Merge branch 'ignite-1.7.4' into master
diff --git a/modules/cassandra/README.txt b/modules/cassandra/README.txt
index cc2134d..146e5d4 100644
--- a/modules/cassandra/README.txt
+++ b/modules/cassandra/README.txt
@@ -1,16 +1,12 @@
 Apache Ignite Cassandra Module
 ------------------------
 
-Apache Ignite Cassandra module provides CacheStore implementation backed by Cassandra database.
-
-To enable Cassandra module when starting a standalone node, move 'optional/ignite-cassandra' folder to
-'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
-be added to classpath in this case.
+Apache Ignite Cassandra module, used just as a parent container for other Cassandra related modules.
 
 Importing Cassandra Module In Maven Project
 -------------------------------------
 
-If you are using Maven to manage dependencies of your project, you can add Cassandra module
+If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
 dependency like this (replace '${ignite.version}' with actual Ignite version you are
 interested in):
 
diff --git a/modules/cassandra/pom.xml b/modules/cassandra/pom.xml
index 7aac116..733d53c 100644
--- a/modules/cassandra/pom.xml
+++ b/modules/cassandra/pom.xml
@@ -31,307 +31,22 @@
     </parent>
 
     <artifactId>ignite-cassandra</artifactId>
+    <packaging>pom</packaging>
     <version>1.8.0-SNAPSHOT</version>
     <url>http://ignite.apache.org</url>
 
-    <properties>
-        <commons-beanutils.version>1.8.3</commons-beanutils.version>
-        <cassandra-driver.version>3.0.0</cassandra-driver.version>
-        <cassandra-all.version>3.3</cassandra-all.version>
-        <kryo.version>3.0.3</kryo.version>
-        <reflectasm.version>1.10.1</reflectasm.version>
-        <minlog.version>1.3.0</minlog.version>
-        <asm.version>5.0.3</asm.version>
-        <objenesis.version>2.1</objenesis.version>
-        <netty-handler.version>4.0.27.Final</netty-handler.version>
-        <netty-buffer.version>4.0.27.Final</netty-buffer.version>
-        <netty-common.version>4.0.27.Final</netty-common.version>
-        <netty-transport.version>4.0.27.Final</netty-transport.version>
-        <netty-codec.version>4.0.27.Final</netty-codec.version>
-        <guava.version>19.0</guava.version>
-        <metrics-core.version>3.0.2</metrics-core.version>
-    </properties>
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <artifactId>ignite-cassandra-store</artifactId>
+                <groupId>org.apache.ignite</groupId>
+                <version>${project.version}</version>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
 
-    <dependencies>
-        <!-- Apache commons -->
-        <dependency>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-            <version>${commons-beanutils.version}</version>
-        </dependency>
-
-        <!-- Kryo and required dependencies -->
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>kryo</artifactId>
-            <version>${kryo.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>reflectasm</artifactId>
-            <version>${reflectasm.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>minlog</artifactId>
-            <version>${minlog.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.ow2.asm</groupId>
-            <artifactId>asm</artifactId>
-            <version>${asm.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.objenesis</groupId>
-            <artifactId>objenesis</artifactId>
-            <version>${objenesis.version}</version>
-        </dependency>
-
-        <!-- Ignite -->
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-core</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-spring</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-log4j</artifactId>
-            <version>${project.version}</version>
-            <scope>test</scope>
-        </dependency>
-
-        <!-- Cassandra and required dependencies -->
-        <dependency>
-            <groupId>com.datastax.cassandra</groupId>
-            <artifactId>cassandra-driver-core</artifactId>
-            <version>${cassandra-driver.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-handler</artifactId>
-            <version>${netty-handler.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-buffer</artifactId>
-            <version>${netty-buffer.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-common</artifactId>
-            <version>${netty-common.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-transport</artifactId>
-            <version>${netty-transport.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-codec</artifactId>
-            <version>${netty-codec.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-            <version>${guava.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.codahale.metrics</groupId>
-            <artifactId>metrics-core</artifactId>
-            <version>${metrics-core.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.cassandra</groupId>
-            <artifactId>cassandra-all</artifactId>
-            <version>${cassandra-all.version}</version>
-            <scope>test</scope>
-            <exclusions>
-                <exclusion>
-                    <artifactId>log4j-over-slf4j</artifactId>
-                    <groupId>org.slf4j</groupId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-
-        <!-- Apache log4j -->
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.2</version>
-                <configuration>
-                    <source>1.7</source>
-                    <target>1.7</target>
-                    <compilerVersion>1.7</compilerVersion>
-                    <encoding>UTF-8</encoding>
-                    <fork>true</fork>
-                    <debug>false</debug>
-                    <debuglevel>lines,vars,source</debuglevel>
-                    <meminitial>256</meminitial>
-                    <maxmem>512</maxmem>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-dependency-plugin</artifactId>
-                <version>2.10</version>
-                <executions>
-                    <execution>
-                        <id>copy-all-dependencies</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/tests-package/lib</outputDirectory>
-                            <overWriteReleases>false</overWriteReleases>
-                            <overWriteSnapshots>false</overWriteSnapshots>
-                            <overWriteIfNewer>true</overWriteIfNewer>
-                        </configuration>
-                    </execution>
-<!-- -->
-                    <execution>
-                        <id>copy-main-dependencies</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/libs</outputDirectory>
-                            <overWriteReleases>false</overWriteReleases>
-                            <overWriteSnapshots>false</overWriteSnapshots>
-                            <overWriteIfNewer>true</overWriteIfNewer>
-                            <excludeTransitive>true</excludeTransitive>
-                            <excludeGroupIds>
-                                org.apache.ignite,org.springframework,org.gridgain
-                            </excludeGroupIds>
-                            <excludeArtifactIds>
-                                commons-logging,slf4j-api,cache-api,slf4j-api,aopalliance
-                            </excludeArtifactIds>
-                            <includeScope>runtime</includeScope>
-                        </configuration>
-                    </execution>
-<!-- -->
-                </executions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-antrun-plugin</artifactId>
-                <version>1.8</version>
-                <dependencies>
-                    <dependency>
-                        <groupId>ant-contrib</groupId>
-                        <artifactId>ant-contrib</artifactId>
-                        <version>1.0b3</version>
-                        <exclusions>
-                            <exclusion>
-                                <groupId>ant</groupId>
-                                <artifactId>ant</artifactId>
-                            </exclusion>
-                        </exclusions>
-                    </dependency>
-                </dependencies>
-                <executions>
-                    <execution>
-                        <id>package-tests</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>run</goal>
-                        </goals>
-                        <configuration>
-                            <target>
-                                <taskdef resource="net/sf/antcontrib/antlib.xml" />
-                                <if>
-                                    <available file="${project.build.directory}/test-classes" type="dir" />
-                                    <then>
-                                        <copy todir="${project.build.directory}/tests-package/lib">
-                                            <fileset dir="${project.build.directory}">
-                                                <include name="*.jar" />
-                                            </fileset>
-                                        </copy>
-
-                                        <jar destfile="${project.build.directory}/tests-package/lib/${project.artifactId}-${project.version}-tests.jar">
-                                            <fileset dir="${project.build.directory}/test-classes">
-                                                <include name="**/*.class" />
-                                            </fileset>
-                                        </jar>
-
-                                        <copy todir="${project.build.directory}/tests-package/settings">
-                                            <fileset dir="${project.build.directory}/test-classes">
-                                                <include name="**/*.properties" />
-                                                <include name="**/*.xml" />
-                                            </fileset>
-                                        </copy>
-
-                                        <copy todir="${project.build.directory}/tests-package">
-                                            <fileset dir="${project.build.testSourceDirectory}/../scripts">
-                                                <include name="**/*" />
-                                            </fileset>
-                                        </copy>
-
-                                        <fixcrlf srcdir="${project.build.directory}/tests-package" eol="lf" eof="remove">
-                                            <include name="*.sh" />
-                                        </fixcrlf>
-
-                                        <copy todir="${project.build.directory}/tests-package">
-                                            <fileset dir="${project.build.testSourceDirectory}/..">
-                                                <include name="bootstrap/**" />
-                                            </fileset>
-                                        </copy>
-
-                                        <fixcrlf srcdir="${project.build.directory}/tests-package/bootstrap" eol="lf" eof="remove">
-                                            <include name="**" />
-                                        </fixcrlf>
-
-                                        <zip destfile="${project.build.directory}/ignite-cassandra-tests-${project.version}.zip" compress="true" whenempty="create" level="9" encoding="UTF-8" useLanguageEncodingFlag="true" createUnicodeExtraFields="not-encodeable">
-
-                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests">
-                                                <exclude name="**/*.sh" />
-                                            </zipfileset>
-
-                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests" filemode="555">
-                                                <include name="**/*.sh" />
-                                            </zipfileset>
-                                        </zip>
-                                    </then>
-                                </if>
-                            </target>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-
-        </plugins>
-    </build>
+    <modules>
+        <module>store</module>
+        <module>serializers</module>
+    </modules>
 </project>
diff --git a/modules/cassandra/serializers/README.txt b/modules/cassandra/serializers/README.txt
new file mode 100644
index 0000000..01948ec
--- /dev/null
+++ b/modules/cassandra/serializers/README.txt
@@ -0,0 +1,33 @@
+Apache Ignite Cassandra Serializers Module
+------------------------
+
+Apache Ignite Cassandra Serializers module provides additional serializers to store objects as BLOBs in Cassandra. The
+module could be used as an addition to Ignite Cassandra Store module.
+
+To enable Cassandra Serializers module when starting a standalone node, move 'optional/ignite-cassandra-serializers' folder to
+'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
+be added to classpath in this case.
+
+Importing Cassandra Serializers Module In Maven Project
+-------------------------------------
+
+If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
+dependency like this (replace '${ignite.version}' with actual Ignite version you are
+interested in):
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    ...
+    <dependencies>
+        ...
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-cassandra-serializers</artifactId>
+            <version>${ignite.version}</version>
+        </dependency>
+        ...
+    </dependencies>
+    ...
+</project>
diff --git a/modules/cassandra/licenses/apache-2.0.txt b/modules/cassandra/serializers/licenses/apache-2.0.txt
similarity index 100%
copy from modules/cassandra/licenses/apache-2.0.txt
copy to modules/cassandra/serializers/licenses/apache-2.0.txt
diff --git a/modules/cassandra/serializers/pom.xml b/modules/cassandra/serializers/pom.xml
new file mode 100644
index 0000000..33be131
--- /dev/null
+++ b/modules/cassandra/serializers/pom.xml
@@ -0,0 +1,129 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    POM file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.ignite</groupId>
+        <artifactId>ignite-cassandra</artifactId>
+        <version>1.8.0-SNAPSHOT</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <artifactId>ignite-cassandra-serializers</artifactId>
+    <version>1.8.0-SNAPSHOT</version>
+    <url>http://ignite.apache.org</url>
+
+    <properties>
+        <kryo.version>3.0.3</kryo.version>
+        <reflectasm.version>1.10.1</reflectasm.version>
+        <minlog.version>1.3.0</minlog.version>
+        <asm.version>5.0.3</asm.version>
+        <objenesis.version>2.1</objenesis.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <artifactId>ignite-cassandra-store</artifactId>
+            <groupId>org.apache.ignite</groupId>
+        </dependency>
+
+        <!-- Kryo and required dependencies -->
+        <dependency>
+            <groupId>com.esotericsoftware</groupId>
+            <artifactId>kryo</artifactId>
+            <version>${kryo.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.esotericsoftware</groupId>
+            <artifactId>reflectasm</artifactId>
+            <version>${reflectasm.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.esotericsoftware</groupId>
+            <artifactId>minlog</artifactId>
+            <version>${minlog.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.ow2.asm</groupId>
+            <artifactId>asm</artifactId>
+            <version>${asm.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.objenesis</groupId>
+            <artifactId>objenesis</artifactId>
+            <version>${objenesis.version}</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.2</version>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                    <compilerVersion>1.7</compilerVersion>
+                    <encoding>UTF-8</encoding>
+                    <fork>true</fork>
+                    <debug>false</debug>
+                    <debuglevel>lines,vars,source</debuglevel>
+                    <meminitial>256</meminitial>
+                    <maxmem>512</maxmem>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <version>2.10</version>
+                <executions>
+                    <execution>
+                        <id>copy-main-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${project.build.directory}/libs</outputDirectory>
+                            <overWriteReleases>false</overWriteReleases>
+                            <overWriteSnapshots>false</overWriteSnapshots>
+                            <overWriteIfNewer>true</overWriteIfNewer>
+                            <excludeTransitive>true</excludeTransitive>
+                            <excludeArtifactIds>
+                                ignite-cassandra-store
+                            </excludeArtifactIds>
+                            <includeScope>runtime</includeScope>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java b/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
similarity index 87%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
rename to modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
index 88379de..775e501 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
+++ b/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
@@ -20,10 +20,11 @@
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.nio.ByteBuffer;
-import org.apache.ignite.IgniteException;
+
 import org.apache.ignite.internal.util.typedef.internal.U;
 
 /**
@@ -36,9 +37,10 @@
     /** */
     private static final int DFLT_BUFFER_SIZE = 4096;
 
-    /** Thread local instance of {@link com.esotericsoftware.kryo.Kryo} */
+    /** Thread local instance of {@link Kryo} */
     private transient ThreadLocal<Kryo> kryos = new ThreadLocal<Kryo>() {
-        protected Kryo initialValue() {
+        /** {@inheritDoc} */
+        @Override protected Kryo initialValue() {
             return new Kryo();
         }
     };
@@ -63,7 +65,7 @@
             return ByteBuffer.wrap(stream.toByteArray());
         }
         catch (Throwable e) {
-            throw new IgniteException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
+            throw new IllegalStateException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
         }
         finally {
             U.closeQuiet(out);
@@ -83,7 +85,7 @@
             return kryos.get().readClassAndObject(in);
         }
         catch (Throwable e) {
-            throw new IgniteException("Failed to deserialize object from byte stream", e);
+            throw new IllegalStateException("Failed to deserialize object from byte stream", e);
         }
         finally {
             U.closeQuiet(in);
diff --git a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java
new file mode 100644
index 0000000..3053c63
--- /dev/null
+++ b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import java.nio.ByteBuffer;
+import java.util.Date;
+
+import org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Simple test for KryoSerializer.
+ */
+public class KryoSerializerTest {
+    /**
+     * Serialize simple object test.
+     */
+    @Test
+    public void simpleTest() {
+        MyPojo pojo1 = new MyPojo("123", 1, 123423453467L, new Date(), null);
+
+        KryoSerializer ser = new KryoSerializer();
+
+        ByteBuffer buff = ser.serialize(pojo1);
+        MyPojo pojo2 = (MyPojo)ser.deserialize(buff);
+
+        assertEquals("Kryo simple serialization test failed", pojo1, pojo2);
+    }
+
+    /**
+     * Serialize object with cyclic references test.
+     */
+    @Test
+    public void cyclicStructureTest() {
+        MyPojo pojo1 = new MyPojo("123", 1, 123423453467L, new Date(), null);
+        MyPojo pojo2 = new MyPojo("321", 2, 123456L, new Date(), pojo1);
+        pojo1.setRef(pojo2);
+
+        KryoSerializer ser = new KryoSerializer();
+
+        ByteBuffer buff1 = ser.serialize(pojo1);
+        ByteBuffer buff2 = ser.serialize(pojo2);
+
+        MyPojo pojo3 = (MyPojo)ser.deserialize(buff1);
+        MyPojo pojo4 = (MyPojo)ser.deserialize(buff2);
+
+        assertEquals("Kryo cyclic structure serialization test failed", pojo1, pojo3);
+        assertEquals("Kryo cyclic structure serialization test failed", pojo1.getRef(), pojo3.getRef());
+        assertEquals("Kryo cyclic structure serialization test failed", pojo2, pojo4);
+        assertEquals("Kryo cyclic structure serialization test failed", pojo2.getRef(), pojo4.getRef());
+    }
+}
diff --git a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java
new file mode 100644
index 0000000..f901db3
--- /dev/null
+++ b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import java.io.Serializable;
+import java.util.Date;
+
+/**
+ * Sample POJO for tests.
+ */
+public class MyPojo implements Serializable {
+    /** */
+    private String field1;
+
+    /** */
+    private int field2;
+
+    /** */
+    private long field3;
+
+    /** */
+    private Date field4;
+
+    /** */
+    private MyPojo ref;
+
+    /**
+     * Empty constructor.
+     */
+    public MyPojo() {
+        // No-op.
+    }
+
+    /**
+     * Full constructor.
+     *
+     * @param field1 Some value.
+     * @param field2 Some value.
+     * @param field3 Some value.
+     * @param field4 Some value.
+     * @param ref Reference to other pojo.
+     */
+    public MyPojo(String field1, int field2, long field3, Date field4, MyPojo ref) {
+        this.field1 = field1;
+        this.field2 = field2;
+        this.field3 = field3;
+        this.field4 = field4;
+        this.ref = ref;
+    }
+
+    /**
+     * Compare POJOs.
+     *
+     * @param obj POJO to compare with.
+     * @return {@code true} if equals.
+     */
+    public boolean equals(Object obj) {
+        if (!(obj instanceof MyPojo))
+            return false;
+
+        MyPojo myObj = (MyPojo)obj;
+
+        if ((field1 == null && myObj.field1 != null) ||
+            (field1 != null && !field1.equals(myObj.field1)))
+            return false;
+
+        if ((field4 == null && myObj.field4 != null) ||
+            (field4 != null && !field4.equals(myObj.field4)))
+            return false;
+
+        return field2 == myObj.field2 && field3 == myObj.field3;
+    }
+
+    /**
+     * @param ref New reference.
+     */
+    public void setRef(MyPojo ref) {
+        this.ref = ref;
+    }
+
+    /**
+     * @return Reference to some POJO.
+     */
+    public MyPojo getRef() {
+        return ref;
+    }
+}
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java b/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
deleted file mode 100644
index e734ca3..0000000
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.Row;
-import java.nio.ByteBuffer;
-import java.util.LinkedList;
-import java.util.List;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
-import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
-
-/**
- * Intermediate layer between persistent store (Cassandra) and Ignite cache key/value classes.
- * Handles  all the mappings to/from Java classes into Cassandra and responsible for all the details
- * of how Java objects should be written/loaded to/from Cassandra.
- */
-public class PersistenceController {
-    /** Ignite cache key/value persistence settings. */
-    private KeyValuePersistenceSettings persistenceSettings;
-
-    /** CQL statement to insert row into Cassandra table. */
-    private String writeStatement;
-
-    /** CQL statement to delete row from Cassandra table. */
-    private String delStatement;
-
-    /** CQL statement to select value fields from Cassandra table. */
-    private String loadStatement;
-
-    /** CQL statement to select key/value fields from Cassandra table. */
-    private String loadStatementWithKeyFields;
-
-    /**
-     * Constructs persistence controller from Ignite cache persistence settings.
-     *
-     * @param settings persistence settings.
-     */
-    public PersistenceController(KeyValuePersistenceSettings settings) {
-        if (settings == null)
-            throw new IllegalArgumentException("Persistent settings can't be null");
-
-        this.persistenceSettings = settings;
-    }
-
-    /**
-     * Returns Ignite cache persistence settings.
-     *
-     * @return persistence settings.
-     */
-    public KeyValuePersistenceSettings getPersistenceSettings() {
-        return persistenceSettings;
-    }
-
-    /**
-     * Returns Cassandra keyspace to use.
-     *
-     * @return keyspace.
-     */
-    public String getKeyspace() {
-        return persistenceSettings.getKeyspace();
-    }
-
-    /**
-     * Returns Cassandra table to use.
-     *
-     * @return table.
-     */
-    public String getTable() {
-        return persistenceSettings.getTable();
-    }
-
-    /**
-     * Returns CQL statement to insert row into Cassandra table.
-     *
-     * @return CQL statement.
-     */
-    public String getWriteStatement() {
-        if (writeStatement != null)
-            return writeStatement;
-
-        List<String> cols = getKeyValueColumns();
-
-        StringBuilder colsList = new StringBuilder();
-        StringBuilder questionsList = new StringBuilder();
-
-        for (String column : cols) {
-            if (colsList.length() != 0) {
-                colsList.append(", ");
-                questionsList.append(",");
-            }
-
-            colsList.append(column);
-            questionsList.append("?");
-        }
-
-        writeStatement = "insert into " + persistenceSettings.getKeyspace() + "." + persistenceSettings.getTable() + " (" +
-            colsList.toString() + ") values (" + questionsList.toString() + ")";
-
-        if (persistenceSettings.getTTL() != null)
-            writeStatement += " using ttl " + persistenceSettings.getTTL();
-
-        writeStatement += ";";
-
-        return writeStatement;
-    }
-
-    /**
-     * Returns CQL statement to delete row from Cassandra table.
-     *
-     * @return CQL statement.
-     */
-    public String getDeleteStatement() {
-        if (delStatement != null)
-            return delStatement;
-
-        List<String> cols = getKeyColumns();
-
-        StringBuilder statement = new StringBuilder();
-
-        for (String column : cols) {
-            if (statement.length() != 0)
-                statement.append(" and ");
-
-            statement.append(column).append("=?");
-        }
-
-        statement.append(";");
-
-        delStatement = "delete from " +
-            persistenceSettings.getKeyspace() + "." +
-            persistenceSettings.getTable() + " where " +
-            statement.toString();
-
-        return delStatement;
-    }
-
-    /**
-     * Returns CQL statement to select key/value fields from Cassandra table.
-     *
-     * @param includeKeyFields whether to include/exclude key fields from the returned row.
-     *
-     * @return CQL statement.
-     */
-    public String getLoadStatement(boolean includeKeyFields) {
-        if (loadStatement != null && loadStatementWithKeyFields != null)
-            return includeKeyFields ? loadStatementWithKeyFields : loadStatement;
-
-        List<String> valCols = getValueColumns();
-
-        List<String> keyCols = getKeyColumns();
-
-        StringBuilder hdrWithKeyFields = new StringBuilder("select ");
-
-        for (int i = 0; i < keyCols.size(); i++) {
-            if (i > 0)
-                hdrWithKeyFields.append(", ");
-
-            hdrWithKeyFields.append(keyCols.get(i));
-        }
-
-        StringBuilder hdr = new StringBuilder("select ");
-
-        for (int i = 0; i < valCols.size(); i++) {
-            if (i > 0)
-                hdr.append(", ");
-
-            hdrWithKeyFields.append(",");
-
-            hdr.append(valCols.get(i));
-            hdrWithKeyFields.append(valCols.get(i));
-        }
-
-        StringBuilder statement = new StringBuilder();
-
-        statement.append(" from ");
-        statement.append(persistenceSettings.getKeyspace());
-        statement.append(".").append(persistenceSettings.getTable());
-        statement.append(" where ");
-
-        for (int i = 0; i < keyCols.size(); i++) {
-            if (i > 0)
-                statement.append(" and ");
-
-            statement.append(keyCols.get(i)).append("=?");
-        }
-
-        statement.append(";");
-
-        loadStatement = hdr.toString() + statement.toString();
-        loadStatementWithKeyFields = hdrWithKeyFields.toString() + statement.toString();
-
-        return includeKeyFields ? loadStatementWithKeyFields : loadStatement;
-    }
-
-    /**
-     * Binds Ignite cache key object to {@link com.datastax.driver.core.PreparedStatement}.
-     *
-     * @param statement statement to which key object should be bind.
-     * @param key key object.
-     *
-     * @return statement with bounded key.
-     */
-    public BoundStatement bindKey(PreparedStatement statement, Object key) {
-        KeyPersistenceSettings settings = persistenceSettings.getKeyPersistenceSettings();
-
-        Object[] values = getBindingValues(settings.getStrategy(),
-            settings.getSerializer(), settings.getFields(), key);
-
-        return statement.bind(values);
-    }
-
-    /**
-     * Binds Ignite cache key and value object to {@link com.datastax.driver.core.PreparedStatement}.
-     *
-     * @param statement statement to which key and value object should be bind.
-     * @param key key object.
-     * @param val value object.
-     *
-     * @return statement with bounded key and value.
-     */
-    public BoundStatement bindKeyValue(PreparedStatement statement, Object key, Object val) {
-        KeyPersistenceSettings keySettings = persistenceSettings.getKeyPersistenceSettings();
-        Object[] keyValues = getBindingValues(keySettings.getStrategy(),
-            keySettings.getSerializer(), keySettings.getFields(), key);
-
-        ValuePersistenceSettings valSettings = persistenceSettings.getValuePersistenceSettings();
-        Object[] valValues = getBindingValues(valSettings.getStrategy(),
-            valSettings.getSerializer(), valSettings.getFields(), val);
-
-        Object[] values = new Object[keyValues.length + valValues.length];
-
-        int i = 0;
-
-        for (Object keyVal : keyValues) {
-            values[i] = keyVal;
-            i++;
-        }
-
-        for (Object valVal : valValues) {
-            values[i] = valVal;
-            i++;
-        }
-
-        return statement.bind(values);
-    }
-
-    /**
-     * Builds Ignite cache key object from returned Cassandra table row.
-     *
-     * @param row Cassandra table row.
-     *
-     * @return key object.
-     */
-    @SuppressWarnings("UnusedDeclaration")
-    public Object buildKeyObject(Row row) {
-        return buildObject(row, persistenceSettings.getKeyPersistenceSettings());
-    }
-
-    /**
-     * Builds Ignite cache value object from Cassandra table row .
-     *
-     * @param row Cassandra table row.
-     *
-     * @return value object.
-     */
-    public Object buildValueObject(Row row) {
-        return buildObject(row, persistenceSettings.getValuePersistenceSettings());
-    }
-
-    /**
-     * Builds object from Cassandra table row.
-     *
-     * @param row Cassandra table row.
-     * @param settings persistence settings to use.
-     *
-     * @return object.
-     */
-    private Object buildObject(Row row, PersistenceSettings settings) {
-        if (row == null)
-            return null;
-
-        PersistenceStrategy stgy = settings.getStrategy();
-
-        Class clazz = settings.getJavaClass();
-
-        String col = settings.getColumn();
-
-        List<PojoField> fields = settings.getFields();
-
-        if (PersistenceStrategy.PRIMITIVE.equals(stgy))
-            return PropertyMappingHelper.getCassandraColumnValue(row, col, clazz, null);
-
-        if (PersistenceStrategy.BLOB.equals(stgy))
-            return settings.getSerializer().deserialize(row.getBytes(col));
-
-        Object obj;
-
-        try {
-            obj = clazz.newInstance();
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to instantiate object of type '" + clazz.getName() + "' using reflection", e);
-        }
-
-        for (PojoField field : fields)
-            field.setValueFromRow(row, obj, settings.getSerializer());
-
-        return obj;
-    }
-
-    /**
-     * Extracts field values from POJO object and converts them into Java types
-     * which could be mapped to Cassandra types.
-     *
-     * @param stgy persistence strategy to use.
-     * @param serializer serializer to use for BLOBs.
-     * @param fields fields who's values should be extracted.
-     * @param obj object instance who's field values should be extracted.
-     *
-     * @return array of object field values converted into Java object instances having Cassandra compatible types
-     */
-    private Object[] getBindingValues(PersistenceStrategy stgy, Serializer serializer, List<PojoField> fields, Object obj) {
-        if (PersistenceStrategy.PRIMITIVE.equals(stgy)) {
-            if (PropertyMappingHelper.getCassandraType(obj.getClass()) == null ||
-                obj.getClass().equals(ByteBuffer.class) || obj instanceof byte[]) {
-                throw new IllegalArgumentException("Couldn't deserialize instance of class '" +
-                    obj.getClass().getName() + "' using PRIMITIVE strategy. Please use BLOB strategy for this case.");
-            }
-
-            return new Object[] {obj};
-        }
-
-        if (PersistenceStrategy.BLOB.equals(stgy))
-            return new Object[] {serializer.serialize(obj)};
-
-        Object[] values = new Object[fields.size()];
-
-        int i = 0;
-
-        for (PojoField field : fields) {
-            Object val = field.getValueFromObject(obj, serializer);
-
-            if (val instanceof byte[])
-                val = ByteBuffer.wrap((byte[]) val);
-
-            values[i] = val;
-
-            i++;
-        }
-
-        return values;
-    }
-
-    /**
-     * Returns list of Cassandra table columns mapped to Ignite cache key and value fields
-     *
-     * @return list of column names
-     */
-    private List<String> getKeyValueColumns() {
-        List<String> cols = getKeyColumns();
-
-        cols.addAll(getValueColumns());
-
-        return cols;
-    }
-
-    /**
-     * Returns list of Cassandra table columns mapped to Ignite cache key fields
-     *
-     * @return list of column names
-     */
-    private List<String> getKeyColumns() {
-        return getColumns(persistenceSettings.getKeyPersistenceSettings());
-    }
-
-    /**
-     * Returns list of Cassandra table columns mapped to Ignite cache value fields
-     *
-     * @return list of column names
-     */
-    private List<String> getValueColumns() {
-        return getColumns(persistenceSettings.getValuePersistenceSettings());
-    }
-
-    /**
-     * Returns list of Cassandra table columns based on persistence strategy to use
-     *
-     * @return list of column names
-     */
-    private List<String> getColumns(PersistenceSettings settings) {
-        List<String> cols = new LinkedList<>();
-
-        if (!PersistenceStrategy.POJO.equals(settings.getStrategy())) {
-            cols.add(settings.getColumn());
-            return cols;
-        }
-
-        for (PojoField field : settings.getFields())
-            cols.add(field.getColumn());
-
-        return cols;
-    }
-}
diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
deleted file mode 100644
index 298c1b4..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
+++ /dev/null
@@ -1,373 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-S3_ROOT=s3://bucket/folder
-S3_DOWNLOADS=$S3_ROOT/test
-S3_SYSTEM=$S3_ROOT/test1
-
-CASSANDRA_DOWNLOAD_URL=http://www-eu.apache.org/dist/cassandra/3.5/apache-cassandra-3.5-bin.tar.gz
-CASSANDRA_TARBALL=apache-cassandra-3.5-bin.tar.gz
-CASSANDRA_UNTAR_DIR=apache-cassandra-3.5
-
-TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_ZIP=ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_UNZIP_DIR=ignite-cassandra-tests
-
-S3_LOGS_URL=$S3_SYSTEM/logs/c-logs
-S3_LOGS_TRIGGER_URL=$S3_SYSTEM/logs-trigger
-S3_BOOTSTRAP_SUCCESS_URL=$S3_SYSTEM/c-success
-S3_BOOTSTRAP_FAILURE_URL=$S3_SYSTEM/c-failure
-S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_SYSTEM/c-discovery
-S3_CASSANDRA_FIRST_NODE_LOCK_URL=$S3_SYSTEM/c-first-node-lock
-S3_CASSANDRA_NODES_JOIN_LOCK_URL=$S3_SYSTEM/c-join-lock
-
-INSTANCE_REGION=us-west-2
-INSTANCE_NAME_TAG=CASSANDRA-SERVER
-INSTANCE_OWNER_TAG=ignite@apache.org
-INSTANCE_PROJECT_TAG=ignite
-
-terminate()
-{
-    if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then
-        S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then
-        S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Cassandra node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Cassandra node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-tagInstance()
-{
-    export EC2_HOME=/opt/aws/apitools/ec2
-    export JAVA_HOME=/opt/jdk1.8.0_77
-    export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
-
-    INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
-    if [ $? -ne 0 ]; then
-        terminate "Failed to get instance metadata to tag it"
-    fi
-
-    if [ -n "$INSTANCE_NAME_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag Name=${INSTANCE_NAME_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: Name=${INSTANCE_NAME_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_OWNER_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag owner=${INSTANCE_OWNER_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: owner=${INSTANCE_OWNER_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_PROJECT_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag project=${INSTANCE_PROJECT_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: project=${INSTANCE_PROJECT_TAG}"
-        fi
-    fi
-}
-
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    if [[ "$1" == s3* ]]; then
-        aws s3 cp $1 $2
-
-        if [ $? -ne 0 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            aws s3 cp $1 $2
-
-            if [ $? -ne 0 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                aws s3 cp $1 $2
-
-                if [ $? -ne 0 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    else
-        curl "$1" -o "$2"
-
-        if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            curl "$1" -o "$2"
-
-            if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                curl "$1" -o "$2"
-
-                if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    fi
-
-    echo "[INFO] $3 package successfully downloaded from $1 into $2"
-}
-
-if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Cassandra node"
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Cassandra download URL: $CASSANDRA_DOWNLOAD_URL"
-echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-echo "[INFO] Logs URL: $S3_LOGS_URL"
-echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL"
-echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK_URL"
-echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK_URL"
-echo "[INFO] Bootsrap success URL: $S3_BOOTSTRAP_SUCCESS_URL"
-echo "[INFO] Bootsrap failure URL: $S3_BOOTSTRAP_FAILURE_URL"
-echo "[INFO]-----------------------------------------------------------------"
-
-echo "[INFO] Installing 'wget' package"
-yum -y install wget
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'wget' package"
-fi
-
-echo "[INFO] Installing 'net-tools' package"
-yum -y install net-tools
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'net-tools' package"
-fi
-
-echo "[INFO] Installing 'python' package"
-yum -y install python
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'python' package"
-fi
-
-echo "[INFO] Installing 'unzip' package"
-yum -y install unzip
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'unzip' package"
-fi
-
-rm -Rf /opt/jdk1.8.0_77 /opt/jdk-8u77-linux-x64.tar.gz
-
-echo "[INFO] Downloading 'jdk-8u77'"
-wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz" -O /opt/jdk-8u77-linux-x64.tar.gz
-if [ $? -ne 0 ]; then
-    terminate "Failed to download 'jdk-8u77'"
-fi
-
-echo "[INFO] Unzipping 'jdk-8u77'"
-tar -xvzf /opt/jdk-8u77-linux-x64.tar.gz -C /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to untar 'jdk-8u77'"
-fi
-
-rm -Rf /opt/jdk-8u77-linux-x64.tar.gz
-
-downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-echo "[INFO] Installing 'pip'"
-python /opt/get-pip.py
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'pip'"
-fi
-
-echo "[INFO] Installing 'awscli'"
-pip install --upgrade awscli
-if [ $? -ne 0 ]; then
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -Rf /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-fi
-
-tagInstance
-
-echo "[INFO] Creating 'cassandra' group"
-exists=$(cat /etc/group | grep cassandra)
-if [ -z "$exists" ]; then
-    groupadd cassandra
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'cassandra' group"
-    fi
-fi
-
-echo "[INFO] Creating 'cassandra' user"
-exists=$(cat /etc/passwd | grep cassandra)
-if [ -z "$exists" ]; then
-    useradd -g cassandra cassandra
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'cassandra' user"
-    fi
-fi
-
-rm -Rf /storage/cassandra /opt/cassandra /opt/$CASSANDRA_TARBALL
-
-echo "[INFO] Creating '/storage/cassandra' storage"
-mkdir -p /storage/cassandra
-chown -R cassandra:cassandra /storage/cassandra
-if [ $? -ne 0 ]; then
-    terminate "Failed to setup Cassandra storage dir: /storage/cassandra"
-fi
-
-downloadPackage "$CASSANDRA_DOWNLOAD_URL" "/opt/$CASSANDRA_TARBALL" "Cassandra"
-
-echo "[INFO] Unzipping Cassandra package"
-tar -xvzf /opt/$CASSANDRA_TARBALL -C /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to untar Cassandra package"
-fi
-
-rm -f /opt/$CASSANDRA_TARBALL /opt/cassandra
-mv /opt/$CASSANDRA_UNTAR_DIR /opt/cassandra
-chown -R cassandra:cassandra /opt/cassandra
-
-downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/$TESTS_PACKAGE_ZIP" "Tests"
-
-unzip /opt/$TESTS_PACKAGE_ZIP -d /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to unzip tests package: $TESTS_PACKAGE_DONLOAD_URL"
-fi
-
-chown -R cassandra:cassandra /opt/$TESTS_PACKAGE_UNZIP_DIR
-find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.sh" -exec chmod ug+x {} \;
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-env.sh" ]; then
-    terminate "There are no cassandra-env.sh in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-start.sh" ]; then
-    terminate "There are no cassandra-start.sh in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-template.yaml" ]; then
-    terminate "There are no cassandra-start.sh in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh" ]; then
-    terminate "There are no logs-collector.sh in tests package"
-fi
-
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-start.sh /opt
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-env.sh /opt/cassandra/conf
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-template.yaml /opt/cassandra/conf
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh /opt
-rm -Rf /opt/$TESTS_PACKAGE_UNZIP_DIR
-chown -R cassandra:cassandra /opt/cassandra /opt/cassandra-start.sh /opt/logs-collector.sh
-
-#profile=/home/cassandra/.bash_profile
-profile=/root/.bash_profile
-
-echo "export JAVA_HOME=/opt/jdk1.8.0_77" >> $profile
-echo "export CASSANDRA_HOME=/opt/cassandra" >> $profile
-echo "export PATH=\$JAVA_HOME/bin:\$CASSANDRA_HOME/bin:\$PATH" >> $profile
-echo "export S3_BOOTSTRAP_SUCCESS_URL=$S3_BOOTSTRAP_SUCCESS_URL" >> $profile
-echo "export S3_BOOTSTRAP_FAILURE_URL=$S3_BOOTSTRAP_FAILURE_URL" >> $profile
-echo "export S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_CASSANDRA_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_CASSANDRA_NODES_JOIN_LOCK_URL=$S3_CASSANDRA_NODES_JOIN_LOCK_URL" >> $profile
-echo "export S3_CASSANDRA_FIRST_NODE_LOCK_URL=$S3_CASSANDRA_FIRST_NODE_LOCK_URL" >> $profile
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-/opt/logs-collector.sh "/opt/cassandra/logs" "$S3_LOGS_URL/$HOST_NAME" "$S3_LOGS_TRIGGER_URL" > /opt/cassandra/logs-collector.log &
-
-cmd="/opt/cassandra-start.sh"
-
-#sudo -u cassandra -g cassandra sh -c "$cmd | tee /opt/cassandra/start.log"
-
-$cmd | tee /opt/cassandra/start.log
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh
deleted file mode 100644
index c73c509..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh
+++ /dev/null
@@ -1,550 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#profile=/home/cassandra/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-
-terminate()
-{
-    if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then
-        S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then
-        S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/
-    fi
-
-    msg=$HOST_NAME
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Failed to start Cassandra node"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${HOST_NAME}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Cassandra node successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${HOST_NAME}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/cassandra/start_result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    if [ -d "/opt/cassandra/logs" ]; then
-        aws s3 sync --sse AES256 /opt/cassandra/logs $reportFolder
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to export Cassandra logs to: $reportFolder"
-        fi
-    fi
-
-    aws s3 cp --sse AES256 /opt/cassandra/start_result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to export node start result to: $reportFile"
-    fi
-
-    rm -f /opt/cassandra/start_result /opt/cassandra/join-lock /opt/cassandra/remote-join-lock
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-registerNode()
-{
-    echo "[INFO] Registering Cassandra node seed: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME"
-
-    aws s3 cp --sse AES256 /opt/cassandra/join-lock ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME
-    if [ $? -ne 0 ]; then
-        terminate "Failed to register Cassandra seed info in: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME"
-    fi
-
-    echo "[INFO] Cassandra node seed successfully registered"
-}
-
-unregisterNode()
-{
-    echo "[INFO] Removing Cassandra node registration from: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME"
-    aws s3 rm ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME
-    echo "[INFO] Cassandra node registration removed"
-}
-
-cleanupMetadata()
-{
-    echo "[INFO] Running cleanup"
-    aws s3 rm $S3_CASSANDRA_NODES_JOIN_LOCK_URL
-    aws s3 rm --recursive $S3_CASSANDRA_NODES_DISCOVERY_URL
-    aws s3 rm --recursive $S3_BOOTSTRAP_SUCCESS_URL
-    aws s3 rm --recursive $S3_BOOTSTRAP_FAILURE_URL
-    echo "[INFO] Cleanup completed"
-}
-
-setupCassandraSeeds()
-{
-    echo "[INFO] Setting up Cassandra seeds"
-
-    if [ "$FIRST_NODE" == "true" ]; then
-        CASSANDRA_SEEDS=$(hostname -f | tr '[:upper:]' '[:lower:]')
-        echo "[INFO] Using host address as a seed for the first Cassandra node: $CASSANDRA_SEEDS"
-        aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY_URL::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-        fi
-
-        cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml
-
-        return 0
-    fi
-
-    echo "[INFO] Looking for Cassandra seeds in: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $S3_CASSANDRA_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            CASSANDRA_SEEDS=$seed1
-            CASSANDRA_SEED=$seed1
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                CASSANDRA_SEEDS="$CASSANDRA_SEEDS,$seed2"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                CASSANDRA_SEEDS="$CASSANDRA_SEEDS,$seed3"
-            fi
-
-            echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS"
-
-            cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra node is still not up and running"
-        fi
-
-        echo "[INFO] Waiting for the first Cassandra node to start and publish its seed, time passed ${duration}min"
-
-        sleep 1m
-    done
-}
-
-tryToGetFirstNodeLock()
-{
-    echo "[INFO] Trying to get first node lock"
-
-    checkFirstNodeLockExist
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    createFirstNodeLock
-
-    sleep 5s
-
-    rm -Rf /opt/cassandra/first-node-lock
-
-    aws s3 cp $S3_CASSANDRA_FIRST_NODE_LOCK_URL /opt/cassandra/first-node-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created first node lock"
-        return 1
-    fi
-
-    first_host=$(cat /opt/cassandra/first-node-lock)
-
-    rm -f /opt/cassandra/first-node-lock
-
-    if [ "$first_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created first node lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got first node lock"
-
-    return 0
-}
-
-checkFirstNodeLockExist()
-{
-    echo "[INFO] Checking for the first node lock"
-
-    lockExists=$(aws s3 ls $S3_CASSANDRA_FIRST_NODE_LOCK_URL)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] First node lock already exists"
-        return 1
-    fi
-
-    echo "[INFO] First node lock doesn't exist"
-
-    return 0
-}
-
-createFirstNodeLock()
-{
-    aws s3 cp --sse AES256 /opt/cassandra/join-lock $S3_CASSANDRA_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create first node lock"
-    fi
-    echo "[INFO] Created first node lock"
-}
-
-removeFirstNodeLock()
-{
-    aws s3 rm $S3_CASSANDRA_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove first node lock"
-    fi
-    echo "[INFO] Removed first node lock"
-}
-
-tryToGetClusterJoinLock()
-{
-    echo "[INFO] Trying to get cluster join lock"
-
-    checkClusterJoinLockExist
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    createClusterJoinLock
-
-    sleep 5s
-
-    rm -Rf /opt/cassandra/remote-join-lock
-
-    aws s3 cp $S3_CASSANDRA_NODES_JOIN_LOCK_URL /opt/cassandra/remote-join-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created cluster join lock"
-        return 1
-    fi
-
-    join_host=$(cat /opt/cassandra/remote-join-lock)
-
-    if [ "$join_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created cluster join lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got cluster join lock"
-
-    return 0
-}
-
-checkClusterJoinLockExist()
-{
-    echo "[INFO] Checking for the cluster join lock"
-
-    lockExists=$(aws s3 ls $S3_CASSANDRA_NODES_JOIN_LOCK_URL)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] Cluster join lock already exists"
-        return 1
-    fi
-
-    status=$(/opt/cassandra/bin/nodetool -h $CASSANDRA_SEED status)
-    leaving=$(echo $status | grep UL)
-    moving=$(echo $status | grep UM)
-    joining=$(echo $status | grep UJ)
-
-    if [ -n "$leaving" ] || [ -n "$moving" ] || [ -n "$joining" ]; then
-        echo "[INFO] Cluster join lock doesn't exist in S3, but some node still trying to join Cassandra cluster"
-        return 1
-    fi
-
-    echo "[INFO] Cluster join lock doesn't exist"
-
-    return 0
-}
-
-createClusterJoinLock()
-{
-    aws s3 cp --sse AES256 /opt/cassandra/join-lock $S3_CASSANDRA_NODES_JOIN_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create cluster join lock"
-    fi
-    echo "[INFO] Created cluster join lock"
-}
-
-removeClusterJoinLock()
-{
-    aws s3 rm $S3_CASSANDRA_NODES_JOIN_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove cluster join lock"
-    fi
-    echo "[INFO] Removed cluster join lock"
-}
-
-waitToJoinCassandraCluster()
-{
-    echo "[INFO] Waiting to join Cassandra cluster"
-
-    while true; do
-        tryToGetClusterJoinLock
-
-        if [ $? -ne 0 ]; then
-            echo "[INFO] Another node is trying to join cluster. Waiting for extra 1min."
-            sleep 1m
-        else
-            echo "[INFO]-------------------------------------------------------------"
-            echo "[INFO] Congratulations, got lock to join Cassandra cluster"
-            echo "[INFO]-------------------------------------------------------------"
-            break
-        fi
-    done
-}
-
-waitFirstCassandraNodeRegistered()
-{
-    echo "[INFO] Waiting for the first Cassandra node to register"
-
-    startTime=$(date +%s)
-
-    while true; do
-        first_host=
-
-        exists=$(aws s3 ls $S3_CASSANDRA_FIRST_NODE_LOCK_URL)
-        if [ -n "$exists" ]; then
-            rm -Rf /opt/cassandra/first-node-lock
-
-            aws s3 cp $S3_CASSANDRA_FIRST_NODE_LOCK_URL /opt/cassandra/first-node-lock
-            if [ $? -ne 0 ]; then
-                terminate "Failed to check existing first node lock"
-            fi
-
-            first_host=$(cat /opt/cassandra/first-node-lock)
-
-            rm -Rf /opt/cassandra/first-node-lock
-        fi
-
-        if [ -n "$first_host" ]; then
-            exists=$(aws s3 ls ${S3_CASSANDRA_NODES_DISCOVERY_URL}${first_host})
-            if [ -n "$exists" ]; then
-                break
-            fi
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra node is still not up and running"
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] First Cassandra node registered"
-}
-
-startCassandra()
-{
-    echo "[INFO]-------------------------------------------------------------"
-    echo "[INFO] Trying attempt $START_ATTEMPT to start Cassandra daemon"
-    echo "[INFO]-------------------------------------------------------------"
-    echo ""
-
-    setupCassandraSeeds
-
-    if [ "$FIRST_NODE" != "true" ]; then
-        waitToJoinCassandraCluster
-    fi
-
-    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
-    proc=($proc)
-
-    if [ -n "${proc[1]}" ]; then
-        echo "[INFO] Terminating existing Cassandra process ${proc[1]}"
-        kill -9 ${proc[1]}
-    fi
-
-    echo "[INFO] Starting Cassandra"
-    rm -Rf /opt/cassandra/logs/* /storage/cassandra/*
-    /opt/cassandra/bin/cassandra -R &
-
-    echo "[INFO] Cassandra job id: $!"
-
-    sleep 1m
-
-    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
-}
-
-# Time (in minutes) to wait for the Cassandra node up and running and register it in S3
-NODE_STARTUP_TIME=10
-
-# Number of attempts to start (not first) Cassandra daemon
-NODE_START_ATTEMPTS=3
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-echo $HOST_NAME > /opt/cassandra/join-lock
-
-START_ATTEMPT=0
-
-FIRST_NODE="false"
-
-unregisterNode
-
-tryToGetFirstNodeLock
-
-if [ $? -eq 0 ]; then
-    FIRST_NODE="true"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE" == "true" ]; then
-    echo "[INFO] Starting first Cassandra node"
-else
-    echo "[INFO] Starting Cassandra node"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK_URL"
-echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK_URL"
-echo "[INFO] Start success URL: $S3_BOOTSTRAP_SUCCESS_URL"
-echo "[INFO] Start failure URL: $S3_BOOTSTRAP_FAILURE_URL"
-echo "[INFO] CASSANDRA_HOME: $CASSANDRA_HOME"
-echo "[INFO] JAVA_HOME: $JAVA_HOME"
-echo "[INFO] PATH: $PATH"
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ -z "$S3_CASSANDRA_NODES_DISCOVERY_URL" ]; then
-    terminate "S3 discovery URL doesn't specified"
-fi
-
-if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-fi
-
-if [ "$FIRST_NODE" != "true" ]; then
-    waitFirstCassandraNodeRegistered
-else
-    cleanupMetadata
-fi
-
-startCassandra
-
-startTime=$(date +%s)
-
-while true; do
-    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
-
-    /opt/cassandra/bin/nodetool status &> /dev/null
-
-    if [ $? -eq 0 ]; then
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Cassandra daemon successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        echo $proc
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ "$FIRST_NODE" != "true" ]; then
-            removeClusterJoinLock
-        fi
-
-        break
-    fi
-
-    currentTime=$(date +%s)
-    duration=$(( $currentTime-$startTime ))
-    duration=$(( $duration/60 ))
-
-    if [ $duration -gt $NODE_STARTUP_TIME ]; then
-        if [ "$FIRST_NODE" == "true" ]; then
-            removeFirstNodeLock
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra daemon is still not up and running"
-        else
-            removeClusterJoinLock
-
-            if [ $START_ATTEMPT -gt $NODE_START_ATTEMPTS ]; then
-                terminate "${NODE_START_ATTEMPTS} attempts exceed, but Cassandra daemon is still not up and running"
-            fi
-
-            startCassandra
-        fi
-
-        continue
-    fi
-
-    concurrencyError=$(cat /opt/cassandra/logs/system.log | grep "java.lang.UnsupportedOperationException: Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true")
-
-    if [ -n "$concurrencyError" ] && [ "$FIRST_NODE" != "true" ]; then
-        removeClusterJoinLock
-        echo "[WARN] Failed to concurrently start Cassandra daemon. Sleeping for extra 1min"
-        sleep 1m
-        startCassandra
-        continue
-    fi
-
-    if [ -z "$proc" ]; then
-        if [ "$FIRST_NODE" == "true" ]; then
-            removeFirstNodeLock
-            terminate "Failed to start Cassandra daemon"
-        fi
-
-        removeClusterJoinLock
-        echo "[WARN] Failed to start Cassandra daemon. Sleeping for extra 1min"
-        sleep 1m
-        startCassandra
-        continue
-    fi
-
-    echo "[INFO] Waiting for Cassandra daemon to start, time passed ${duration}min"
-    sleep 30s
-done
-
-registerNode
-
-terminate
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh b/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
deleted file mode 100644
index a3a0601..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
+++ /dev/null
@@ -1,384 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-S3_ROOT=s3://bucket/folder
-S3_DOWNLOADS=$S3_ROOT/test
-S3_SYSTEM=$S3_ROOT/test1
-
-IGNITE_DOWNLOAD_URL=$S3_DOWNLOADS/apache-ignite-fabric-1.6.0-SNAPSHOT-bin.zip
-IGNITE_ZIP=apache-ignite-fabric-1.6.0-SNAPSHOT-bin.zip
-IGNITE_UNZIP_DIR=apache-ignite-fabric-1.6.0-SNAPSHOT-bin
-
-TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_ZIP=ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_UNZIP_DIR=ignite-cassandra-tests
-
-S3_LOGS_URL=$S3_SYSTEM/logs/i-logs
-S3_LOGS_TRIGGER_URL=$S3_SYSTEM/logs-trigger
-S3_BOOTSTRAP_SUCCESS_URL=$S3_SYSTEM/i-success
-S3_BOOTSTRAP_FAILURE_URL=$S3_SYSTEM/i-failure
-S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_SYSTEM/c-discovery
-S3_IGNITE_NODES_DISCOVERY_URL=$S3_SYSTEM/i-discovery
-S3_IGNITE_FIRST_NODE_LOCK_URL=$S3_SYSTEM/i-first-node-lock
-S3_IGNITE_NODES_JOIN_LOCK_URL=$S3_SYSTEM/i-join-lock
-
-INSTANCE_REGION=us-west-2
-INSTANCE_NAME_TAG=IGNITE-SERVER
-INSTANCE_OWNER_TAG=ignite@apache.org
-INSTANCE_PROJECT_TAG=ignite
-
-terminate()
-{
-    if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then
-        S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then
-        S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Ignite node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ignite node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-tagInstance()
-{
-    export EC2_HOME=/opt/aws/apitools/ec2
-    export JAVA_HOME=/opt/jdk1.8.0_77
-    export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
-
-    INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
-    if [ $? -ne 0 ]; then
-        terminate "Failed to get instance metadata to tag it"
-    fi
-
-    if [ -n "$INSTANCE_NAME_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag Name=${INSTANCE_NAME_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: Name=${INSTANCE_NAME_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_OWNER_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag owner=${INSTANCE_OWNER_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: owner=${INSTANCE_OWNER_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_PROJECT_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag project=${INSTANCE_PROJECT_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: project=${INSTANCE_PROJECT_TAG}"
-        fi
-    fi
-}
-
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    if [[ "$1" == s3* ]]; then
-        aws s3 cp $1 $2
-
-        if [ $? -ne 0 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            aws s3 cp $1 $2
-
-            if [ $? -ne 0 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                aws s3 cp $1 $2
-
-                if [ $? -ne 0 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    else
-        curl "$1" -o "$2"
-
-        if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            curl "$1" -o "$2"
-
-            if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                curl "$1" -o "$2"
-
-                if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    fi
-
-    echo "[INFO] $3 package successfully downloaded from $1 into $2"
-}
-
-if [[ "$S3_IGNITE_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_IGNITE_NODES_DISCOVERY_URL=${S3_IGNITE_NODES_DISCOVERY_URL}/
-fi
-
-if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Ignite node"
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Ignite download URL: $IGNITE_DOWNLOAD_URL"
-echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-echo "[INFO] Logs URL: $S3_LOGS_URL"
-echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL"
-echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-echo "[INFO] Ignite first node lock URL: $S3_IGNITE_FIRST_NODE_LOCK_URL"
-echo "[INFO] Ignite nodes join lock URL: $S3_IGNITE_NODES_JOIN_LOCK_URL"
-echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Bootsrap success URL: $S3_BOOTSTRAP_SUCCESS_URL"
-echo "[INFO] Bootsrap failure URL: $S3_BOOTSTRAP_FAILURE_URL"
-echo "[INFO]-----------------------------------------------------------------"
-
-echo "[INFO] Installing 'wget' package"
-yum -y install wget
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'wget' package"
-fi
-
-echo "[INFO] Installing 'net-tools' package"
-yum -y install net-tools
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'net-tools' package"
-fi
-
-echo "[INFO] Installing 'python' package"
-yum -y install python
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'python' package"
-fi
-
-echo "[INFO] Installing 'unzip' package"
-yum -y install unzip
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'unzip' package"
-fi
-
-rm -Rf /opt/jdk1.8.0_77 /opt/jdk-8u77-linux-x64.tar.gz
-
-echo "[INFO] Downloading 'jdk-8u77'"
-wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz" -O /opt/jdk-8u77-linux-x64.tar.gz
-if [ $? -ne 0 ]; then
-    terminate "Failed to download 'jdk-8u77'"
-fi
-
-echo "[INFO] Unzipping 'jdk-8u77'"
-tar -xvzf /opt/jdk-8u77-linux-x64.tar.gz -C /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to untar 'jdk-8u77'"
-fi
-
-rm -Rf /opt/jdk-8u77-linux-x64.tar.gz
-
-downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-echo "[INFO] Installing 'pip'"
-python /opt/get-pip.py
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'pip'"
-fi
-
-echo "[INFO] Installing 'awscli'"
-pip install --upgrade awscli
-if [ $? -ne 0 ]; then
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -fR /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-fi
-
-tagInstance
-
-echo "[INFO] Creating 'ignite' group"
-exists=$(cat /etc/group | grep ignite)
-if [ -z "$exists" ]; then
-    groupadd ignite
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'ignite' group"
-    fi
-fi
-
-echo "[INFO] Creating 'ignite' user"
-exists=$(cat /etc/passwd | grep ignite)
-if [ -z "$exists" ]; then
-    useradd -g ignite ignite
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'ignite' user"
-    fi
-fi
-
-rm -Rf /opt/ignite /opt/$IGNITE_ZIP
-
-downloadPackage "$IGNITE_DOWNLOAD_URL" "/opt/$IGNITE_ZIP" "Ignite"
-
-echo "[INFO] Unzipping Ignite package"
-unzip /opt/$IGNITE_ZIP -d /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to unzip Ignite package"
-fi
-
-rm -Rf /opt/$IGNITE_ZIP /opt/ignite-start.sh /opt/ignite-env.sh /opt/ignite
-mv /opt/$IGNITE_UNZIP_DIR /opt/ignite
-chown -R ignite:ignite /opt/ignite
-
-downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/$TESTS_PACKAGE_ZIP" "Tests"
-
-unzip /opt/$TESTS_PACKAGE_ZIP -d /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to unzip tests package: $TESTS_PACKAGE_DONLOAD_URL"
-fi
-
-chown -R ignite:ignite /opt/$TESTS_PACKAGE_UNZIP_DIR
-find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.sh" -exec chmod ug+x {} \;
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-start.sh" ]; then
-    terminate "There are no ignite-start.sh in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-cassandra-server-template.xml" ]; then
-    terminate "There are no ignite-cassandra-server-template.xml in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh" ]; then
-    terminate "There are no logs-collector.sh in tests package"
-fi
-
-testsJar=$(find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.jar" | grep ignite-cassandra- | grep tests.jar)
-if [ -n "$testsJar" ]; then
-    echo "[INFO] Coping tests jar $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
-    cp $testsJar /opt/ignite/libs/optional/ignite-cassandra
-    if [ $? -ne 0 ]; then
-        terminate "Failed copy $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
-    fi
-fi
-
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-start.sh /opt
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-cassandra-server-template.xml /opt/ignite/config
-mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh /opt
-
-if [ -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-env.sh" ]; then
-    mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-env.sh /opt
-    chown -R ignite:ignite /opt/ignite-env.sh
-fi
-
-rm -Rf /opt/$TESTS_PACKAGE_UNZIP_DIR
-chown -R ignite:ignite /opt/ignite-start.sh /opt/logs-collector.sh /opt/ignite/config/ignite-cassandra-server-template.xml
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-echo "export JAVA_HOME=/opt/jdk1.8.0_77" >> $profile
-echo "export IGNITE_HOME=/opt/ignite" >> $profile
-echo "export USER_LIBS=\$IGNITE_HOME/libs/optional/ignite-cassandra/*:\$IGNITE_HOME/libs/optional/ignite-slf4j/*" >> $profile
-echo "export PATH=\$JAVA_HOME/bin:\IGNITE_HOME/bin:\$PATH" >> $profile
-echo "export S3_BOOTSTRAP_SUCCESS_URL=$S3_BOOTSTRAP_SUCCESS_URL" >> $profile
-echo "export S3_BOOTSTRAP_FAILURE_URL=$S3_BOOTSTRAP_FAILURE_URL" >> $profile
-echo "export S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_CASSANDRA_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_IGNITE_NODES_DISCOVERY_URL=$S3_IGNITE_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_IGNITE_NODES_JOIN_LOCK_URL=$S3_IGNITE_NODES_JOIN_LOCK_URL" >> $profile
-echo "export S3_IGNITE_FIRST_NODE_LOCK_URL=$S3_IGNITE_FIRST_NODE_LOCK_URL" >> $profile
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-/opt/logs-collector.sh "/opt/ignite/work/log" "$S3_LOGS_URL/$HOST_NAME" "$S3_LOGS_TRIGGER_URL" > /opt/ignite/logs-collector.log &
-
-cmd="/opt/ignite-start.sh"
-
-#sudo -u ignite -g ignite sh -c "$cmd | tee /opt/ignite/start.log"
-
-$cmd | tee /opt/ignite/start.log
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-start.sh b/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-start.sh
deleted file mode 100644
index bb1ff0c..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-start.sh
+++ /dev/null
@@ -1,637 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-
-terminate()
-{
-    if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then
-        S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then
-        S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/
-    fi
-
-    msg=$HOST_NAME
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Failed to start Ignite node"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${HOST_NAME}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ignite node successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${HOST_NAME}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/ignite/start_result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    if [ -d "/opt/ignite/work/log" ]; then
-        aws s3 sync --sse AES256 /opt/ignite/work/log $reportFolder
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to export Ignite logs to: $reportFolder"
-        fi
-    fi
-
-    aws s3 cp --sse AES256 /opt/ignite/start_result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to export node start result to: $reportFile"
-    fi
-
-    rm -f /opt/ignite/start_result /opt/ignite/join-lock /opt/ignite/remote-join-lock
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-registerNode()
-{
-    echo "[INFO] Registering Ignite node seed: ${S3_IGNITE_NODES_DISCOVERY_URL}$HOST_NAME"
-
-    aws s3 cp --sse AES256 /opt/ignite/join-lock ${S3_IGNITE_NODES_DISCOVERY_URL}$HOST_NAME
-    if [ $? -ne 0 ]; then
-        terminate "Failed to register Ignite node seed: ${S3_IGNITE_NODES_DISCOVERY_URL}$HOST_NAME"
-    fi
-
-    echo "[INFO] Ignite node seed successfully registered"
-}
-
-unregisterNode()
-{
-    echo "[INFO] Removing Ignite node registration from: ${S3_IGNITE_NODES_DISCOVERY_URL}$HOST_NAME"
-    aws s3 rm ${S3_IGNITE_NODES_DISCOVERY_URL}$HOST_NAME
-    echo "[INFO] Ignite node registration removed"
-}
-
-cleanupMetadata()
-{
-    echo "[INFO] Running cleanup"
-    aws s3 rm $S3_IGNITE_NODES_JOIN_LOCK_URL
-    aws s3 rm --recursive $S3_IGNITE_NODES_DISCOVERY_URL
-    aws s3 rm --recursive $S3_BOOTSTRAP_SUCCESS_URL
-    aws s3 rm --recursive $S3_BOOTSTRAP_FAILURE_URL
-    echo "[INFO] Cleanup completed"
-}
-
-setupCassandraSeeds()
-{
-    echo "[INFO] Setting up Cassandra seeds"
-
-    echo "[INFO] Looking for Cassandra seeds in: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $S3_CASSANDRA_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            CASSANDRA_SEEDS="<value>$seed1<\/value>"
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                CASSANDRA_SEEDS="$CASSANDRA_SEEDS<value>$seed2<\/value>"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                CASSANDRA_SEEDS="$CASSANDRA_SEEDS<value>$seed3<\/value>"
-            fi
-
-            echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS"
-
-            cat /opt/ignite/config/ignite-cassandra-server-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server.xml
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but no Cassandra nodes is up and running"
-        fi
-
-        echo "[INFO] Waiting for the first Cassandra node to start and publish its seed, time passed ${duration}min"
-
-        sleep 1m
-    done
-}
-
-setupIgniteSeeds()
-{
-    echo "[INFO] Setting up Ignite seeds"
-
-    if [ "$FIRST_NODE" == "true" ]; then
-        IGNITE_SEEDS="<value>127.0.0.1:47500..47509<\/value>"
-        echo "[INFO] Using localhost address as a seed for the first Ignite node: $IGNITE_SEEDS"
-        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY_URL::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-        fi
-
-        cat /opt/ignite/config/ignite-cassandra-server.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server1.xml
-        mv -f /opt/ignite/config/ignite-cassandra-server1.xml /opt/ignite/config/ignite-cassandra-server.xml
-
-        return 0
-    fi
-
-    echo "[INFO] Looking for Ignite seeds in: $S3_IGNITE_NODES_DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $S3_IGNITE_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            IGNITE_SEEDS="<value>$seed1<\/value>"
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                IGNITE_SEEDS="$IGNITE_SEEDS<value>$seed2<\/value>"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                IGNITE_SEEDS="$IGNITE_SEEDS<value>$seed3<\/value>"
-            fi
-
-            echo "[INFO] Using Ignite seeds: $IGNITE_SEEDS"
-
-            cat /opt/ignite/config/ignite-cassandra-server.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server1.xml
-            mv -f /opt/ignite/config/ignite-cassandra-server1.xml /opt/ignite/config/ignite-cassandra-server.xml
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but no Ignite nodes is up and running"
-        fi
-
-        echo "[INFO] Waiting for the first Ignite node to start and publish its seed, time passed ${duration}min"
-
-        sleep 1m
-    done
-}
-
-tryToGetFirstNodeLock()
-{
-    echo "[INFO] Trying to get first node lock"
-
-    checkFirstNodeLockExist
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    createFirstNodeLock
-
-    sleep 5s
-
-    rm -Rf /opt/ignite/first-node-lock
-
-    aws s3 cp $S3_IGNITE_FIRST_NODE_LOCK_URL /opt/ignite/first-node-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created first node lock"
-        return 1
-    fi
-
-    first_host=$(cat /opt/ignite/first-node-lock)
-
-    rm -f /opt/ignite/first-node-lock
-
-    if [ "$first_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created first node lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got first node lock"
-
-    return 0
-}
-
-checkFirstNodeLockExist()
-{
-    echo "[INFO] Checking for the first node lock"
-
-    lockExists=$(aws s3 ls $S3_IGNITE_FIRST_NODE_LOCK_URL)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] First node lock already exists"
-        return 1
-    fi
-
-    echo "[INFO] First node lock doesn't exist yet"
-
-    return 0
-}
-
-createFirstNodeLock()
-{
-    aws s3 cp --sse AES256 /opt/ignite/join-lock $S3_IGNITE_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create first node lock"
-    fi
-    echo "[INFO] Created first node lock"
-}
-
-removeFirstNodeLock()
-{
-    aws s3 rm $S3_IGNITE_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove first node lock"
-    fi
-    echo "[INFO] Removed first node lock"
-}
-
-tryToGetClusterJoinLock()
-{
-    echo "[INFO] Trying to get cluster join lock"
-
-    checkClusterJoinLockExist
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    createClusterJoinLock
-
-    sleep 5s
-
-    rm -Rf /opt/ignite/remote-join-lock
-
-    aws s3 cp $S3_IGNITE_NODES_JOIN_LOCK_URL /opt/ignite/remote-join-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created cluster join lock"
-        return 1
-    fi
-
-    join_host=$(cat /opt/ignite/remote-join-lock)
-
-    if [ "$join_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created cluster join lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got cluster join lock"
-
-    return 0
-}
-
-checkClusterJoinLockExist()
-{
-    echo "[INFO] Checking for the cluster join lock"
-
-    lockExists=$(aws s3 ls $S3_IGNITE_NODES_JOIN_LOCK_URL)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] Cluster join lock already exists"
-        return 1
-    fi
-
-    echo "[INFO] Cluster join lock doesn't exist"
-
-    return 0
-}
-
-createClusterJoinLock()
-{
-    aws s3 cp --sse AES256 /opt/ignite/join-lock $S3_IGNITE_NODES_JOIN_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create cluster join lock"
-    fi
-    echo "[INFO] Created cluster join lock"
-}
-
-removeClusterJoinLock()
-{
-    aws s3 rm $S3_IGNITE_NODES_JOIN_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove cluster join lock"
-    fi
-    echo "[INFO] Removed cluster join lock"
-}
-
-waitToJoinIgniteCluster()
-{
-    echo "[INFO] Waiting to join Ignite cluster"
-
-    while true; do
-        tryToGetClusterJoinLock
-
-        if [ $? -ne 0 ]; then
-            echo "[INFO] Another node is trying to join cluster. Waiting for extra 1min."
-            sleep 1m
-        else
-            echo "[INFO]-------------------------------------------------------------"
-            echo "[INFO] Congratulations, got lock to join Ignite cluster"
-            echo "[INFO]-------------------------------------------------------------"
-            break
-        fi
-    done
-}
-
-checkIgniteStatus()
-{
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-
-    nodeId=
-    nodeAddrs=
-    nodePorts=
-    topology=
-    metrics=
-
-    logFile=$(ls /opt/ignite/work/log/ | grep "\.log$")
-    if [ -n "$logFile" ]; then
-        logFile=/opt/ignite/work/log/$logFile
-        nodeId=$(cat $logFile | grep "Local node \[ID")
-        nodeAddrs=$(cat $logFile | grep "Local node addresses:")
-        nodePorts=$(cat $logFile | grep "Local ports:")
-        topology=$(cat $logFile | grep "Topology snapshot")
-        metrics=$(cat $logFile | grep "Metrics for local node" | head -n 1)
-    fi
-
-    if [ -n "$nodeId" ] && [ -n "$nodeAddrs" ] && [ -n "$nodePorts" ] && [ -n "$topology" ] && [ -n "$metrics" ] && [ -n "$proc" ]; then
-        sleep 30s
-        return 0
-    fi
-
-    return 1
-}
-
-waitFirstIgniteNodeRegistered()
-{
-    echo "[INFO] Waiting for the first Ignite node to register"
-
-    startTime=$(date +%s)
-
-    while true; do
-        first_host=
-
-        exists=$(aws s3 ls $S3_IGNITE_FIRST_NODE_LOCK_URL)
-        if [ -n "$exists" ]; then
-            rm -Rf /opt/ignite/first-node-lock
-
-            aws s3 cp $S3_IGNITE_FIRST_NODE_LOCK_URL /opt/ignite/first-node-lock
-            if [ $? -ne 0 ]; then
-                terminate "Failed to check existing first node lock"
-            fi
-
-            first_host=$(cat /opt/ignite/first-node-lock)
-
-            rm -Rf /opt/ignite/first-node-lock
-        fi
-
-        if [ -n "$first_host" ]; then
-            exists=$(aws s3 ls ${S3_IGNITE_NODES_DISCOVERY_URL}${first_host})
-            if [ -n "$exists" ]; then
-                break
-            fi
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but first Ignite node is still not up and running"
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] First Ignite node registered"
-}
-
-startIgnite()
-{
-    echo "[INFO]-------------------------------------------------------------"
-    echo "[INFO] Trying attempt $START_ATTEMPT to start Ignite daemon"
-    echo "[INFO]-------------------------------------------------------------"
-    echo ""
-
-    setupCassandraSeeds
-    setupIgniteSeeds
-
-    if [ "$FIRST_NODE" == "true" ]; then
-        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY_URL::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-        fi
-    else
-        waitToJoinIgniteCluster
-    fi
-
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-    proc=($proc)
-
-    if [ -n "${proc[1]}" ]; then
-        echo "[INFO] Terminating existing Ignite process ${proc[1]}"
-        kill -9 ${proc[1]}
-    fi
-
-    echo "[INFO] Starting Ignite"
-    rm -Rf /opt/ignite/work/*
-    /opt/ignite/bin/ignite.sh /opt/ignite/config/ignite-cassandra-server.xml &
-
-    echo "[INFO] Ignite job id: $!"
-
-    sleep 1m
-
-    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
-}
-
-# Time (in minutes) to wait for Ignite/Cassandra daemon up and running
-NODE_STARTUP_TIME=10
-
-# Number of attempts to start (not first) Ignite daemon
-NODE_START_ATTEMPTS=3
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-echo $HOST_NAME > /opt/ignite/join-lock
-
-START_ATTEMPT=0
-
-FIRST_NODE="false"
-
-unregisterNode
-
-tryToGetFirstNodeLock
-
-if [ $? -eq 0 ]; then
-    FIRST_NODE="true"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE" == "true" ]; then
-    echo "[INFO] Starting first Ignite node"
-else
-    echo "[INFO] Starting Ignite node"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Ignite nodes discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-echo "[INFO] Ignite first node lock URL: $S3_IGNITE_FIRST_NODE_LOCK_URL"
-echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Start success URL: $S3_BOOTSTRAP_SUCCESS_URL"
-echo "[INFO] Start failure URL: $S3_BOOTSTRAP_FAILURE_URL"
-echo "[INFO] IGNITE_HOME: $IGNITE_HOME"
-echo "[INFO] JAVA_HOME: $JAVA_HOME"
-echo "[INFO] PATH: $PATH"
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ -z "$S3_CASSANDRA_NODES_DISCOVERY_URL" ]; then
-    terminate "Cassandra S3 discovery URL doesn't specified"
-fi
-
-if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-fi
-
-if [ -z "$S3_IGNITE_NODES_DISCOVERY_URL" ]; then
-    terminate "Ignite S3 discovery URL doesn't specified"
-fi
-
-if [[ "$S3_IGNITE_NODES_DISCOVERY_URL" != */ ]]; then
-    S3_IGNITE_NODES_DISCOVERY_URL=${S3_IGNITE_NODES_DISCOVERY_URL}/
-fi
-
-if [ "$FIRST_NODE" != "true" ]; then
-    waitFirstIgniteNodeRegistered
-else
-    cleanupMetadata
-fi
-
-envScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/ignite-env.sh)
-if [ -f "$envScript" ]; then
-    . $envScript
-fi
-
-startIgnite
-
-startTime=$(date +%s)
-
-while true; do
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-
-    checkIgniteStatus
-
-    if [ $? -eq 0 ]; then
-        sleep 1m
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ignite daemon successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        echo $proc
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ "$FIRST_NODE" != "true" ]; then
-            removeClusterJoinLock
-        fi
-
-        break
-    fi
-
-    currentTime=$(date +%s)
-    duration=$(( $currentTime-$startTime ))
-    duration=$(( $duration/60 ))
-
-    if [ $duration -gt $NODE_STARTUP_TIME ]; then
-        if [ "$FIRST_NODE" == "true" ]; then
-            removeFirstNodeLock
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but first Ignite daemon is still not up and running"
-        else
-            removeClusterJoinLock
-
-            if [ $START_ATTEMPT -gt $NODE_START_ATTEMPTS ]; then
-                terminate "${NODE_START_ATTEMPTS} attempts exceed, but Ignite daemon is still not up and running"
-            fi
-
-            startIgnite
-        fi
-
-        continue
-    fi
-
-    if [ -z "$proc" ]; then
-        if [ "$FIRST_NODE" == "true" ]; then
-            removeFirstNodeLock
-            terminate "Failed to start Ignite daemon"
-        fi
-
-        removeClusterJoinLock
-        echo "[WARN] Failed to start Ignite daemon. Sleeping for extra 1min"
-        sleep 1m
-        startIgnite
-        continue
-    fi
-
-    echo "[INFO] Waiting for Ignite daemon to start, time passed ${duration}min"
-    sleep 30s
-done
-
-registerNode
-
-terminate
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/logs-collector.sh b/modules/cassandra/src/test/bootstrap/aws/logs-collector.sh
deleted file mode 100644
index 73e3c2c..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/logs-collector.sh
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-echo "[INFO] Running Logs collector service"
-
-if [ -z "$1" ]; then
-    echo "[ERROR] Local logs directory doesn't specified"
-    exit 1
-fi
-
-echo "[INFO] Local logs directory: $1"
-
-if [ -z "$2" ]; then
-    echo "[ERROR] S3 folder where to upload logs doesn't specified"
-    exit 1
-fi
-
-echo "[INFO] S3 logs upload folder: $2"
-
-if [ -z "$3" ]; then
-    echo "[ERROR] Logs collection S3 trigger URL doesn't specified"
-    exit 1
-fi
-
-echo "[INFO] Logs collection S3 trigger URL: $3"
-
-echo "--------------------------------------------------------------------"
-
-TRIGGER_STATE=
-
-while true; do
-    sleep 1m
-
-    STATE=$(aws s3 ls $3)
-
-    if [ -z "$STATE" ] || [ "$STATE" == "$TRIGGER_STATE" ]; then
-        continue
-    fi
-
-    TRIGGER_STATE=$STATE
-
-    exists=
-    if [ -d "$1" ]; then
-        exists="true"
-    fi
-
-    echo "[INFO] Uploading logs from $1 to $2"
-
-    if [ "$exists" != "true" ]; then
-        echo "[INFO] Local logs directory $1 doesn't exist, thus there is nothing to upload"
-    fi
-
-    echo "--------------------------------------------------------------------"
-
-    if [ "$exists" != "true" ]; then
-        continue
-    fi
-
-    aws s3 sync --sse AES256 --delete "$1" "$2"
-
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to upload logs from $1 to $2 from first attempt"
-        sleep 30s
-
-        aws s3 sync --sse AES256 --delete "$1" "$2"
-
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to upload logs from $1 to $2 from second attempt"
-            sleep 1m
-
-            aws s3 sync --sse AES256 --delete "$1" "$2"
-
-            if [ $? -ne 0 ]; then
-                echo "[ERROR] Failed to upload logs from $1 to $2 from third attempt"
-            else
-                echo "[INFO] Logs successfully uploaded from $1 to $2 from third attempt"
-            fi
-        else
-            echo "[INFO] Logs successfully uploaded from $1 to $2 from second attempt"
-        fi
-    else
-        echo "[INFO] Logs successfully uploaded from $1 to $2"
-    fi
-
-    echo "--------------------------------------------------------------------"
-done
diff --git a/modules/cassandra/src/test/bootstrap/aws/tests/tests-bootstrap.sh b/modules/cassandra/src/test/bootstrap/aws/tests/tests-bootstrap.sh
deleted file mode 100644
index d00ddb6..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/tests/tests-bootstrap.sh
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-S3_ROOT=s3://bucket/folder
-S3_DOWNLOADS=$S3_ROOT/test
-S3_SYSTEM=$S3_ROOT/test1
-
-TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_ZIP=ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
-TESTS_PACKAGE_UNZIP_DIR=ignite-cassandra-tests
-
-S3_LOGS_URL=$S3_SYSTEM/logs/t-logs
-S3_LOGS_TRIGGER_URL=$S3_SYSTEM/logs-trigger
-S3_IGNITE_NODES_DISCOVERY_URL=$S3_SYSTEM/i-discovery
-S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_SYSTEM/c-discovery
-S3_TEST_NODES_DISCOVERY_URL=$S3_SYSTEM/t-discovery
-S3_TESTS_SUCCESS_URL=$S3_SYSTEM/t-success
-S3_TESTS_FAILURE_URL=$S3_SYSTEM/t-failure
-S3_TESTS_RUNNING_URL=$S3_SYSTEM/t-running
-S3_TESTS_WAITING_URL=$S3_SYSTEM/t-waiting
-S3_IGNITE_SUCCESS_URL=$S3_SYSTEM/i-success
-S3_IGNITE_FAILURE_URL=$S3_SYSTEM/i-failure
-S3_CASSANDRA_SUCCESS_URL=$S3_SYSTEM/c-success
-S3_CASSANDRA_FAILURE_URL=$S3_SYSTEM/c-failure
-S3_TESTS_FIRST_NODE_LOCK_URL=$S3_SYSTEM/t-first-node-lock
-S3_TESTS_SUMMARY_URL=$S3_SYSTEM/t-summary.zip
-
-INSTANCE_REGION=us-west-2
-INSTANCE_NAME_TAG=TEST-SERVER
-INSTANCE_OWNER_TAG=ignite@apache.org
-INSTANCE_PROJECT_TAG=ignite
-
-CASSANDRA_NODES_COUNT=50
-IGNITE_NODES_COUNT=30
-TEST_NODES_COUNT=30
-
-TESTS_TYPE="ignite"
-
-terminate()
-{
-    if [[ "$S3_TESTS_SUCCESS_URL" != */ ]]; then
-        S3_TESTS_SUCCESS_URL=${S3_TESTS_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_TESTS_FAILURE_URL" != */ ]]; then
-        S3_TESTS_FAILURE_URL=${S3_TESTS_FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Test node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_TESTS_FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Test node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_TESTS_SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-tagInstance()
-{
-    export EC2_HOME=/opt/aws/apitools/ec2
-    export JAVA_HOME=/opt/jdk1.8.0_77
-    export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
-
-    INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
-    if [ $? -ne 0 ]; then
-        terminate "Failed to get instance metadata to tag it"
-    fi
-
-    if [ -n "$INSTANCE_NAME_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag Name=${INSTANCE_NAME_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: Name=${INSTANCE_NAME_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_OWNER_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag owner=${INSTANCE_OWNER_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: owner=${INSTANCE_OWNER_TAG}"
-        fi
-    fi
-
-    if [ -n "$INSTANCE_PROJECT_TAG" ]; then
-        ec2-create-tags $INSTANCE_ID --tag project=${INSTANCE_PROJECT_TAG} --region $INSTANCE_REGION
-        if [ $code -ne 0 ]; then
-            terminate "Failed to tag EC2 instance with: project=${INSTANCE_PROJECT_TAG}"
-        fi
-    fi
-}
-
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    if [[ "$1" == s3* ]]; then
-        aws s3 cp $1 $2
-
-        if [ $? -ne 0 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            aws s3 cp $1 $2
-
-            if [ $? -ne 0 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                aws s3 cp $1 $2
-
-                if [ $? -ne 0 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    else
-        curl "$1" -o "$2"
-
-        if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-            echo "[WARN] Failed to download $3 package from first attempt"
-            rm -Rf $2
-            sleep 10s
-
-            echo "[INFO] Trying second attempt to download $3 package"
-            curl "$1" -o "$2"
-
-            if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                echo "[WARN] Failed to download $3 package from second attempt"
-                rm -Rf $2
-                sleep 10s
-
-                echo "[INFO] Trying third attempt to download $3 package"
-                curl "$1" -o "$2"
-
-                if [ $? -ne 0 ] && [ $? -ne 6 ]; then
-                    terminate "All three attempts to download $3 package from $1 are failed"
-                fi
-            fi
-        fi
-    fi
-
-    echo "[INFO] $3 package successfully downloaded from $1 into $2"
-}
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Tests node"
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Tests type: $TESTS_TYPE"
-echo "[INFO] Test nodes count: $TEST_NODES_COUNT"
-echo "[INFO] Ignite nodes count: $IGNITE_NODES_COUNT"
-echo "[INFO] Cassandra nodes count: $CASSANDRA_NODES_COUNT"
-echo "[INFO] Tests summary URL: $S3_TESTS_SUMMARY_URL"
-echo "[INFO] Tests first node lock URL: $S3_TESTS_FIRST_NODE_LOCK_URL"
-echo "[INFO] Logs URL: $S3_LOGS_URL"
-echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL"
-echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-echo "[INFO] Test node discovery URL: $S3_TEST_NODES_DISCOVERY_URL"
-echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Tests running URL: $S3_TESTS_RUNNING_URL"
-echo "[INFO] Tests waiting URL: $S3_TESTS_WAITING_URL"
-echo "[INFO] Tests success URL: $S3_TESTS_SUCCESS_URL"
-echo "[INFO] Tests failure URL: $S3_TESTS_FAILURE_URL"
-echo "[INFO] Ignite success URL: $S3_IGNITE_SUCCESS_URL"
-echo "[INFO] Ignite failure URL: $S3_IGNITE_FAILURE_URL"
-echo "[INFO] Cassandra success URL: $S3_CASSANDRA_SUCCESS_URL"
-echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_FAILURE_URL"
-echo "[INFO]-----------------------------------------------------------------"
-
-echo "[INFO] Installing 'wget' package"
-yum -y install wget
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'wget' package"
-fi
-
-echo "[INFO] Installing 'net-tools' package"
-yum -y install net-tools
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'net-tools' package"
-fi
-
-echo "[INFO] Installing 'python' package"
-yum -y install python
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'python' package"
-fi
-
-echo "[INFO] Installing 'unzip' package"
-yum -y install unzip
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'unzip' package"
-fi
-
-rm -Rf /opt/jdk1.8.0_77 /opt/jdk-8u77-linux-x64.tar.gz
-
-echo "[INFO] Downloading 'jdk-8u77'"
-wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz" -O /opt/jdk-8u77-linux-x64.tar.gz
-if [ $? -ne 0 ]; then
-    terminate "Failed to download 'jdk-8u77'"
-fi
-
-echo "[INFO] Unzipping 'jdk-8u77'"
-tar -xvzf /opt/jdk-8u77-linux-x64.tar.gz -C /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to untar 'jdk-8u77'"
-fi
-
-rm -Rf /opt/jdk-8u77-linux-x64.tar.gz
-
-downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-echo "[INFO] Installing 'pip'"
-python /opt/get-pip.py
-if [ $? -ne 0 ]; then
-    terminate "Failed to install 'pip'"
-fi
-
-echo "[INFO] Installing 'awscli'"
-pip install --upgrade awscli
-if [ $? -ne 0 ]; then
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -fR /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-fi
-
-tagInstance
-
-echo "[INFO] Creating 'ignite' group"
-exists=$(cat /etc/group | grep ignite)
-if [ -z "$exists" ]; then
-    groupadd ignite
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'ignite' group"
-    fi
-fi
-
-echo "[INFO] Creating 'ignite' user"
-exists=$(cat /etc/passwd | grep ignite)
-if [ -z "$exists" ]; then
-    useradd -g ignite ignite
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create 'ignite' user"
-    fi
-fi
-
-downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/$TESTS_PACKAGE_ZIP" "Tests"
-
-unzip /opt/$TESTS_PACKAGE_ZIP -d /opt
-if [ $? -ne 0 ]; then
-    terminate "Failed to unzip tests package: $TESTS_PACKAGE_DONLOAD_URL"
-fi
-
-mv /opt/$TESTS_PACKAGE_UNZIP_DIR /opt/ignite-cassandra-tests
-
-if [ ! -f "/opt/ignite-cassandra-tests/cassandra-load-tests.sh" ]; then
-    terminate "There are no cassandra-load-tests.sh in tests package"
-fi
-
-if [ ! -f "/opt/ignite-cassandra-tests/ignite-load-tests.sh" ]; then
-    terminate "There are no ignite-load-tests.sh in tests package"
-fi
-
-if [ ! -f "/opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template.xml" ]; then
-    terminate "There are no ignite-cassandra-client-template.xml in tests package"
-fi
-
-if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh" ]; then
-    terminate "There are no logs-collector.sh in tests package"
-fi
-
-chown -R ignite:ignite /opt/ignite-cassandra-tests
-find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
-
-cp -f /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh /opt
-chown -R ignite:ignite /opt/logs-collector.sh
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-echo "export JAVA_HOME=/opt/jdk1.8.0_77" >> $profile
-echo "export PATH=\$JAVA_HOME/bin:\IGNITE_HOME/bin:\$PATH" >> $profile
-echo "export TESTS_TYPE=$TESTS_TYPE" >> $profile
-echo "export S3_TESTS_SUMMARY_URL=$S3_TESTS_SUMMARY_URL" >> $profile
-echo "export S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_CASSANDRA_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_TEST_NODES_DISCOVERY_URL=$S3_TEST_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_IGNITE_NODES_DISCOVERY_URL=$S3_IGNITE_NODES_DISCOVERY_URL" >> $profile
-echo "export S3_TESTS_RUNNING_URL=$S3_TESTS_RUNNING_URL" >> $profile
-echo "export S3_TESTS_WAITING_URL=$S3_TESTS_WAITING_URL" >> $profile
-echo "export S3_TESTS_SUCCESS_URL=$S3_TESTS_SUCCESS_URL" >> $profile
-echo "export S3_TESTS_FAILURE_URL=$S3_TESTS_FAILURE_URL" >> $profile
-echo "export S3_IGNITE_SUCCESS_URL=$S3_IGNITE_SUCCESS_URL" >> $profile
-echo "export S3_IGNITE_FAILURE_URL=$S3_IGNITE_FAILURE_URL" >> $profile
-echo "export S3_CASSANDRA_SUCCESS_URL=$S3_CASSANDRA_SUCCESS_URL" >> $profile
-echo "export S3_CASSANDRA_FAILURE_URL=$S3_CASSANDRA_FAILURE_URL" >> $profile
-echo "export S3_TESTS_FIRST_NODE_LOCK_URL=$S3_TESTS_FIRST_NODE_LOCK_URL" >> $profile
-echo "export CASSANDRA_NODES_COUNT=$CASSANDRA_NODES_COUNT" >> $profile
-echo "export IGNITE_NODES_COUNT=$IGNITE_NODES_COUNT" >> $profile
-echo "export TEST_NODES_COUNT=$TEST_NODES_COUNT" >> $profile
-echo "export S3_LOGS_TRIGGER_URL=$S3_LOGS_TRIGGER_URL" >> $profile
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-/opt/logs-collector.sh "/opt/ignite-cassandra-tests/logs" "$S3_LOGS_URL/$HOST_NAME" "$S3_LOGS_TRIGGER_URL" > /opt/ignite-cassandra-tests/logs-collector.log &
-
-cmd="/opt/ignite-cassandra-tests/bootstrap/aws/tests/tests-run.sh"
-
-#sudo -u ignite -g ignite sh -c "$cmd | tee /opt/ignite-cassandra-tests/start.log"
-
-$cmd | tee /opt/ignite-cassandra-tests/start.log
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/tests/tests-run.sh b/modules/cassandra/src/test/bootstrap/aws/tests/tests-run.sh
deleted file mode 100644
index 74a769a..0000000
--- a/modules/cassandra/src/test/bootstrap/aws/tests/tests-run.sh
+++ /dev/null
@@ -1,715 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-
-terminate()
-{
-    if [[ "$S3_TESTS_SUCCESS_URL" != */ ]]; then
-        S3_TESTS_SUCCESS_URL=${S3_TESTS_SUCCESS_URL}/
-    fi
-
-    if [[ "$S3_TESTS_FAILURE_URL" != */ ]]; then
-        S3_TESTS_FAILURE_URL=${S3_TESTS_FAILURE_URL}/
-    fi
-
-    msg=$HOST_NAME
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Tests execution failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${S3_TESTS_FAILURE_URL}${HOST_NAME}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Tests execution successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${S3_TESTS_SUCCESS_URL}${HOST_NAME}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/ignite-cassandra-tests/tests-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed drop report folder: $reportFolder"
-    fi
-
-    if [ -d "/opt/ignite-cassandra-tests/logs" ]; then
-        aws s3 sync --sse AES256 /opt/ignite-cassandra-tests/logs $reportFolder
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to export tests logs to: $reportFolder"
-        fi
-    fi
-
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/tests-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report tests results to: $reportFile"
-    fi
-
-    aws s3 rm ${S3_TESTS_RUNNING_URL}${HOST_NAME}
-    aws s3 rm ${S3_TESTS_WAITING_URL}${HOST_NAME}
-
-    if [ "$FIRST_NODE" == "true" ]; then
-        waitAllTestNodesCompleted
-        removeFirstNodeLock
-        reportScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/tests-report.sh)
-        $reportScript
-
-        if [ -n "$S3_LOGS_TRIGGER_URL" ]; then
-            aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/hostname $S3_LOGS_TRIGGER_URL
-            if [ $? -ne 0 ]; then
-                echo "[ERROR] Failed to trigger logs collection"
-            fi
-        fi
-    fi
-
-    rm -Rf /opt/ignite-cassandra-tests/tests-result /opt/ignite-cassandra-tests/hostname
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-cleanupMetadata()
-{
-    echo "[INFO] Running cleanup"
-    aws s3 rm $S3_TESTS_SUMMARY_URL
-    aws s3 rm --recursive $S3_TEST_NODES_DISCOVERY_URL
-    aws s3 rm --recursive $S3_TESTS_RUNNING_URL
-    aws s3 rm --recursive $S3_TESTS_WAITING_URL
-    aws s3 rm --recursive $S3_TESTS_SUCCESS_URL
-    aws s3 rm --recursive $S3_TESTS_FAILURE_URL
-    echo "[INFO] Cleanup completed"
-}
-
-registerTestNode()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/hostname ${S3_TEST_NODES_DISCOVERY_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create test node registration flag: ${S3_TEST_NODES_DISCOVERY_URL}${HOST_NAME}"
-    fi
-}
-
-createRunningFlag()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/hostname ${S3_TESTS_RUNNING_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create tests running flag: ${S3_TESTS_RUNNING_URL}${HOST_NAME}"
-    fi
-}
-
-dropRunningFlag()
-{
-    exists=$(aws s3 ls ${S3_TESTS_RUNNING_URL}${HOST_NAME})
-    if [ -z "$exists" ]; then
-        return 0
-    fi
-
-    aws s3 rm ${S3_TESTS_RUNNING_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to drop tests running flag: ${S3_TESTS_RUNNING_URL}${HOST_NAME}"
-    fi
-}
-
-createWaitingFlag()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/hostname ${S3_TESTS_WAITING_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create tests waiting flag: ${S3_TESTS_WAITING_URL}${HOST_NAME}"
-    fi
-}
-
-dropWaitingFlag()
-{
-    exists=$(aws s3 ls ${S3_TESTS_WAITING_URL}${HOST_NAME})
-    if [ -z "$exists" ]; then
-        return 0
-    fi
-
-    aws s3 rm ${S3_TESTS_WAITING_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to drop tests waiting flag: ${S3_TESTS_WAITING_URL}${HOST_NAME}"
-    fi
-}
-
-dropTestsSummary()
-{
-    exists=$(aws s3 ls $S3_TESTS_SUMMARY_URL)
-    if [ -z "$exists" ]; then
-        return 0
-    fi
-
-    aws s3 rm $S3_TESTS_SUMMARY_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to drop tests summary info: $S3_TESTS_SUMMARY_URL"
-    fi
-}
-
-validate()
-{
-    if [ -z "$TESTS_TYPE" ]; then
-        terminate "Tests type 'ignite' or 'cassandra' should be specified"
-    fi
-
-    if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
-        terminate "Incorrect tests type specified: $TESTS_TYPE"
-    fi
-
-    if [ -z "$S3_TESTS_SUCCESS_URL" ]; then
-        terminate "Tests success URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_SUCCESS_URL" != */ ]]; then
-        S3_TESTS_SUCCESS_URL=${S3_TESTS_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_FAILURE_URL" ]; then
-        terminate "Tests failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_FAILURE_URL" != */ ]]; then
-        S3_TESTS_FAILURE_URL=${S3_TESTS_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_RUNNING_URL" ]; then
-        terminate "Tests running URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_RUNNING_URL" != */ ]]; then
-        S3_TESTS_RUNNING_URL=${S3_TESTS_RUNNING_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_WAITING_URL" ]; then
-        terminate "Tests waiting URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_WAITING_URL" != */ ]]; then
-        S3_TESTS_WAITING_URL=${S3_TESTS_WAITING_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_SUCCESS_URL" ]; then
-        terminate "Ignite success URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_SUCCESS_URL" != */ ]]; then
-        S3_IGNITE_SUCCESS_URL=${S3_IGNITE_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_FAILURE_URL" ]; then
-        terminate "Ignite failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_FAILURE_URL" != */ ]]; then
-        S3_IGNITE_FAILURE_URL=${S3_IGNITE_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_SUCCESS_URL" ]; then
-        terminate "Cassandra success URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_SUCCESS_URL" != */ ]]; then
-        S3_CASSANDRA_SUCCESS_URL=${S3_CASSANDRA_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_FAILURE_URL" ]; then
-        terminate "Cassandra failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_FAILURE_URL" != */ ]]; then
-        S3_CASSANDRA_FAILURE_URL=${S3_CASSANDRA_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_TEST_NODES_DISCOVERY_URL" ]; then
-        terminate "Tests S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_TEST_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_TEST_NODES_DISCOVERY_URL=${S3_TEST_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_NODES_DISCOVERY_URL" ]; then
-        terminate "Cassandra S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_NODES_DISCOVERY_URL" ]; then
-        terminate "Ignite S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_NODES_DISCOVERY_URL" ]; then
-        terminate "Ignite S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_IGNITE_NODES_DISCOVERY_URL=${S3_IGNITE_NODES_DISCOVERY_URL}/
-    fi
-}
-
-setupCassandraSeeds()
-{
-    if [ $CASSANDRA_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    CASSANDRA_SEEDS1=
-    CASSANDRA_SEEDS2=
-
-    echo "[INFO] Setting up Cassandra seeds"
-
-    echo "[INFO] Looking for Cassandra seeds in: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $S3_CASSANDRA_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            CASSANDRA_SEEDS1="<value>$seed1<\/value>"
-            CASSANDRA_SEEDS2="$seed1"
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                CASSANDRA_SEEDS1="$CASSANDRA_SEEDS1<value>$seed2<\/value>"
-                CASSANDRA_SEEDS2="${CASSANDRA_SEEDS2},$seed2"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                CASSANDRA_SEEDS1="$CASSANDRA_SEEDS1<value>$seed3<\/value>"
-                CASSANDRA_SEEDS2="${CASSANDRA_SEEDS2},$seed3"
-            fi
-
-            echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS2"
-
-            echo "contact.points=$CASSANDRA_SEEDS2" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/connection.properties
-
-            cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS1/g" > /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but no Cassandra nodes is up and running"
-        fi
-
-        echo "[INFO] Waiting for the first Cassandra node to start and publish its seed, time passed ${duration}min"
-
-        sleep 1m
-    done
-}
-
-setupIgniteSeeds()
-{
-    if [ $IGNITE_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[INFO] Setting up Ignite seeds"
-
-    echo "[INFO] Looking for Ignite seeds in: $S3_IGNITE_NODES_DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $S3_IGNITE_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            IGNITE_SEEDS="<value>$seed1<\/value>"
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                IGNITE_SEEDS="$IGNITE_SEEDS<value>$seed2<\/value>"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                IGNITE_SEEDS="$IGNITE_SEEDS<value>$seed3<\/value>"
-            fi
-
-            echo "[INFO] Using Ignite seeds: $IGNITE_SEEDS"
-
-            cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
-            rm -f /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ $duration -gt $NODE_STARTUP_TIME ]; then
-            terminate "${NODE_STARTUP_TIME}min timeout expired, but no Ignite nodes is up and running"
-        fi
-
-        echo "[INFO] Waiting for the first Ignite node to start and publish its seed, time passed ${duration}min"
-
-        sleep 1m
-    done
-}
-
-tryToGetFirstNodeLock()
-{
-    echo "[INFO] Trying to get first node lock"
-
-    checkFirstNodeLockExist
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    createFirstNodeLock
-
-    sleep 5s
-
-    rm -Rf /opt/ignite-cassandra-tests/first-node-lock
-
-    aws s3 cp $S3_TESTS_FIRST_NODE_LOCK_URL /opt/ignite-cassandra-tests/first-node-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created first node lock"
-        return 1
-    fi
-
-    first_host=$(cat /opt/ignite-cassandra-tests/first-node-lock)
-
-    rm -f /opt/ignite-cassandra-tests/first-node-lock
-
-    if [ "$first_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created first node lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got first node lock"
-
-    return 0
-}
-
-checkFirstNodeLockExist()
-{
-    echo "[INFO] Checking for the first node lock"
-
-    lockExists=$(aws s3 ls $S3_TESTS_FIRST_NODE_LOCK_URL)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] First node lock already exists"
-        return 1
-    fi
-
-    echo "[INFO] First node lock doesn't exist yet"
-
-    return 0
-}
-
-createFirstNodeLock()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/hostname $S3_TESTS_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create first node lock"
-    fi
-    echo "[INFO] Created first node lock"
-}
-
-removeFirstNodeLock()
-{
-    exists=$(aws s3 ls $S3_TESTS_FIRST_NODE_LOCK_URL)
-    if [ -z "$exists" ]; then
-        return 0
-    fi
-
-    aws s3 rm $S3_TESTS_FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to remove first node lock"
-        return 1
-    fi
-
-    echo "[INFO] Removed first node lock"
-}
-
-waitAllIgniteNodesReady()
-{
-    if [ $IGNITE_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[INFO] Waiting for all $IGNITE_NODES_COUNT Ignite nodes up and running"
-
-    while true; do
-        successCount=$(aws s3 ls $S3_IGNITE_SUCCESS_URL | wc -l)
-        failureCount=$(aws s3 ls $S3_IGNITE_FAILURE_URL | wc -l)
-
-        if [ $successCount -ge $IGNITE_NODES_COUNT ]; then
-            break
-        fi
-
-        if [ "$failureCount" != "0" ]; then
-            terminate "$failureCount Ignite nodes are failed to start. Thus it doesn't make sense to run tests."
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] Congratulation, all $IGNITE_NODES_COUNT Ignite nodes are up and running"
-}
-
-waitAllCassandraNodesReady()
-{
-    if [ $CASSANDRA_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[INFO] Waiting for all $CASSANDRA_NODES_COUNT Cassandra nodes up and running"
-
-    while true; do
-        successCount=$(aws s3 ls $S3_CASSANDRA_SUCCESS_URL | wc -l)
-        failureCount=$(aws s3 ls $S3_CASSANDRA_FAILURE_URL | wc -l)
-
-        if [ $successCount -ge $CASSANDRA_NODES_COUNT ]; then
-            break
-        fi
-
-        if [ "$failureCount" != "0" ]; then
-            terminate "$failureCount Cassandra nodes are failed to start. Thus it doesn't make sense to run tests."
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] Congratulation, all $CASSANDRA_NODES_COUNT Cassandra nodes are up and running"
-}
-
-waitFirstTestNodeRegistered()
-{
-    echo "[INFO] Waiting for the first test node to register"
-
-    while true; do
-        first_host=
-
-        exists=$(aws s3 ls $S3_TESTS_FIRST_NODE_LOCK_URL)
-        if [ -n "$exists" ]; then
-            rm -Rf /opt/ignite-cassandra-tests/first-node-lock
-
-            aws s3 cp $S3_TESTS_FIRST_NODE_LOCK_URL /opt/ignite-cassandra-tests/first-node-lock
-            if [ $? -ne 0 ]; then
-                terminate "Failed to check existing first node lock"
-            fi
-
-            first_host=$(cat /opt/ignite-cassandra-tests/first-node-lock)
-
-            rm -Rf /opt/ignite-cassandra-tests/first-node-lock
-        fi
-
-        if [ -n "$first_host" ]; then
-            exists=$(aws s3 ls ${S3_TEST_NODES_DISCOVERY_URL}${first_host})
-            if [ -n "$exists" ]; then
-                break
-            fi
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] First test node registered"
-}
-
-waitAllTestNodesReady()
-{
-    createWaitingFlag
-
-    echo "[INFO] Waiting for all $TEST_NODES_COUNT test nodes up and running"
-
-    while true; do
-
-        nodesCount=$(aws s3 ls $S3_TEST_NODES_DISCOVERY_URL | wc -l)
-
-        if [ $nodesCount -ge $TEST_NODES_COUNT ]; then
-            break
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] Congratulation, all $TEST_NODES_COUNT test nodes are up and running"
-
-    dropWaitingFlag
-    createRunningFlag
-}
-
-waitAllTestNodesCompleted()
-{
-    echo "[INFO] Waiting for all $TEST_NODES_COUNT test nodes to complete their tests"
-
-    while true; do
-        successCount=$(aws s3 ls $S3_TESTS_SUCCESS_URL | wc -l)
-        failureCount=$(aws s3 ls $S3_TESTS_FAILURE_URL | wc -l)
-        count=$(( $successCount+$failureCount ))
-
-        if [ $count -ge $TEST_NODES_COUNT ]; then
-            break
-        fi
-
-        echo "[INFO] Waiting extra 1min"
-
-        sleep 1m
-    done
-
-    echo "[INFO] Congratulation, all $TEST_NODES_COUNT test nodes have completed their tests"
-}
-
-# Time (in minutes) to wait for Ignite/Cassandra node up and running and register it in S3
-NODE_STARTUP_TIME=10
-
-HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-echo $HOST_NAME > /opt/ignite-cassandra-tests/hostname
-
-validate
-
-FIRST_NODE="false"
-
-tryToGetFirstNodeLock
-
-if [ $? -eq 0 ]; then
-    FIRST_NODE="true"
-fi
-
-dropRunningFlag
-dropWaitingFlag
-
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE" == "true" ]; then
-    echo "[INFO] Running tests from first node"
-    dropTestsSummary
-else
-    echo "[INFO] Running tests"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Tests type: $TESTS_TYPE"
-echo "[INFO] Test nodes count: $TEST_NODES_COUNT"
-echo "[INFO] Ignite nodes count: $IGNITE_NODES_COUNT"
-echo "[INFO] Cassandra nodes count: $CASSANDRA_NODES_COUNT"
-echo "[INFO] Tests summary URL: $S3_TESTS_SUMMARY_URL"
-echo "[INFO] Tests first node lock URL: $S3_TESTS_FIRST_NODE_LOCK_URL"
-echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-echo "[INFO] Test node discovery URL: $S3_TEST_NODES_DISCOVERY_URL"
-echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
-echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
-echo "[INFO] Tests running URL: $S3_TESTS_RUNNING_URL"
-echo "[INFO] Tests waiting URL: $S3_TESTS_WAITING_URL"
-echo "[INFO] Tests success URL: $S3_TESTS_SUCCESS_URL"
-echo "[INFO] Tests failure URL: $S3_TESTS_FAILURE_URL"
-echo "[INFO] Ignite success URL: $S3_IGNITE_SUCCESS_URL"
-echo "[INFO] Ignite failure URL: $S3_IGNITE_FAILURE_URL"
-echo "[INFO] Cassandra success URL: $S3_CASSANDRA_SUCCESS_URL"
-echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_FAILURE_URL"
-echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL"
-echo "[INFO] JAVA_HOME: $JAVA_HOME"
-echo "[INFO] PATH: $PATH"
-echo "[INFO]-----------------------------------------------------------------"
-
-echo "admin.user=cassandra" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-echo "admin.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-echo "regular.user=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-echo "regular.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-
-waitAllCassandraNodesReady
-waitAllIgniteNodesReady
-
-setupCassandraSeeds
-setupIgniteSeeds
-
-if [ "$FIRST_NODE" != "true" ]; then
-    waitFirstTestNodeRegistered
-else
-    cleanupMetadata
-fi
-
-registerTestNode
-
-waitAllTestNodesReady
-
-cd /opt/ignite-cassandra-tests
-
-if [ "$TESTS_TYPE" == "ignite" ]; then
-    echo "[INFO] Running Ignite load tests"
-    ./ignite-load-tests.sh
-    result=$?
-else
-    echo "[INFO] Running Cassandra load tests"
-    ./cassandra-load-tests.sh
-    result=$?
-fi
-
-if [ $result -ne 0 ]; then
-    terminate ""
-fi
-
-terminate
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java b/modules/cassandra/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
deleted file mode 100644
index 26cca68..0000000
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.util.Collection;
-import java.util.Map;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.apache.ignite.tests.utils.CacheStoreHelper;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Unit tests for {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore} implementation of
- * {@link org.apache.ignite.cache.store.CacheStore} which allows to store Ignite cache data into Cassandra tables.
- */
-public class CassandraDirectPersistenceTest {
-    /** */
-    private static final Logger LOGGER = Logger.getLogger(CassandraDirectPersistenceTest.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        LOGGER.info("Start tests execution");
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        try {
-            CassandraHelper.dropTestKeyspaces();
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-
-            if (CassandraHelper.useEmbeddedCassandra()) {
-                try {
-                    CassandraHelper.stopEmbeddedCassandra();
-                }
-                catch (Throwable e) {
-                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
-                }
-            }
-        }
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void primitiveStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("stringTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
-        Collection<CacheEntryImpl<String, String>> strEntries = TestsHelper.generateStringsEntries();
-
-        Collection<Long> fakeLongKeys = TestsHelper.getKeys(longEntries);
-        fakeLongKeys.add(-1L);
-        fakeLongKeys.add(-2L);
-        fakeLongKeys.add(-3L);
-        fakeLongKeys.add(-4L);
-
-        Collection<String> fakeStrKeys = TestsHelper.getKeys(strEntries);
-        fakeStrKeys.add("-1");
-        fakeStrKeys.add("-2");
-        fakeStrKeys.add("-3");
-        fakeStrKeys.add("-4");
-
-        LOGGER.info("Running PRIMITIVE strategy write tests");
-
-        LOGGER.info("Running single operation write tests");
-        store1.write(longEntries.iterator().next());
-        store2.write(strEntries.iterator().next());
-        LOGGER.info("Single operation write tests passed");
-
-        LOGGER.info("Running bulk operation write tests");
-        store1.writeAll(longEntries);
-        store2.writeAll(strEntries);
-        LOGGER.info("Bulk operation write tests passed");
-
-        LOGGER.info("PRIMITIVE strategy write tests passed");
-
-        LOGGER.info("Running PRIMITIVE strategy read tests");
-
-        LOGGER.info("Running single operation read tests");
-
-        LOGGER.info("Running real keys read tests");
-
-        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
-        if (!longEntries.iterator().next().getValue().equals(longVal))
-            throw new RuntimeException("Long values was incorrectly deserialized from Cassandra");
-
-        String strVal = (String)store2.load(strEntries.iterator().next().getKey());
-        if (!strEntries.iterator().next().getValue().equals(strVal))
-            throw new RuntimeException("String values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Running fake keys read tests");
-
-        longVal = (Long)store1.load(-1L);
-        if (longVal != null)
-            throw new RuntimeException("Long value with fake key '-1' was found in Cassandra");
-
-        strVal = (String)store2.load("-1");
-        if (strVal != null)
-            throw new RuntimeException("String value with fake key '-1' was found in Cassandra");
-
-        LOGGER.info("Single operation read tests passed");
-
-        LOGGER.info("Running bulk operation read tests");
-
-        LOGGER.info("Running real keys read tests");
-
-        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values was incorrectly deserialized from Cassandra");
-
-        Map strValues = store2.loadAll(TestsHelper.getKeys(strEntries));
-        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
-            throw new RuntimeException("String values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Running fake keys read tests");
-
-        longValues = store1.loadAll(fakeLongKeys);
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values was incorrectly deserialized from Cassandra");
-
-        strValues = store2.loadAll(fakeStrKeys);
-        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
-            throw new RuntimeException("String values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk operation read tests passed");
-
-        LOGGER.info("PRIMITIVE strategy read tests passed");
-
-        LOGGER.info("Running PRIMITIVE strategy delete tests");
-
-        LOGGER.info("Deleting real keys");
-
-        store1.delete(longEntries.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(longEntries));
-
-        store2.delete(strEntries.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(strEntries));
-
-        LOGGER.info("Deleting fake keys");
-
-        store1.delete(-1L);
-        store2.delete("-1");
-
-        store1.deleteAll(fakeLongKeys);
-        store2.deleteAll(fakeStrKeys);
-
-        LOGGER.info("PRIMITIVE strategy delete tests passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void blobStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
-        Collection<CacheEntryImpl<Long, Person>> personEntries = TestsHelper.generateLongsPersonsEntries();
-
-        LOGGER.info("Running BLOB strategy write tests");
-
-        LOGGER.info("Running single operation write tests");
-        store1.write(longEntries.iterator().next());
-        store2.write(personEntries.iterator().next());
-        store3.write(personEntries.iterator().next());
-        LOGGER.info("Single operation write tests passed");
-
-        LOGGER.info("Running bulk operation write tests");
-        store1.writeAll(longEntries);
-        store2.writeAll(personEntries);
-        store3.writeAll(personEntries);
-        LOGGER.info("Bulk operation write tests passed");
-
-        LOGGER.info("BLOB strategy write tests passed");
-
-        LOGGER.info("Running BLOB strategy read tests");
-
-        LOGGER.info("Running single operation read tests");
-
-        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
-        if (!longEntries.iterator().next().getValue().equals(longVal))
-            throw new RuntimeException("Long values was incorrectly deserialized from Cassandra");
-
-        Person personVal = (Person)store2.load(personEntries.iterator().next().getKey());
-        if (!personEntries.iterator().next().getValue().equals(personVal))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        personVal = (Person)store3.load(personEntries.iterator().next().getKey());
-        if (!personEntries.iterator().next().getValue().equals(personVal))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Single operation read tests passed");
-
-        LOGGER.info("Running bulk operation read tests");
-
-        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values was incorrectly deserialized from Cassandra");
-
-        Map personValues = store2.loadAll(TestsHelper.getKeys(personEntries));
-        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        personValues = store3.loadAll(TestsHelper.getKeys(personEntries));
-        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk operation read tests passed");
-
-        LOGGER.info("BLOB strategy read tests passed");
-
-        LOGGER.info("Running BLOB strategy delete tests");
-
-        store1.delete(longEntries.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(longEntries));
-
-        store2.delete(personEntries.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(personEntries));
-
-        store3.delete(personEntries.iterator().next().getKey());
-        store3.deleteAll(TestsHelper.getKeys(personEntries));
-
-        LOGGER.info("BLOB strategy delete tests passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void pojoStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Person>> entries1 = TestsHelper.generateLongsPersonsEntries();
-        Collection<CacheEntryImpl<PersonId, Person>> entries2 = TestsHelper.generatePersonIdsPersonsEntries();
-        Collection<CacheEntryImpl<PersonId, Person>> entries3 = TestsHelper.generatePersonIdsPersonsEntries();
-
-        LOGGER.info("Running POJO strategy write tests");
-
-        LOGGER.info("Running single operation write tests");
-        store1.write(entries1.iterator().next());
-        store2.write(entries2.iterator().next());
-        store3.write(entries3.iterator().next());
-        LOGGER.info("Single operation write tests passed");
-
-        LOGGER.info("Running bulk operation write tests");
-        store1.writeAll(entries1);
-        store2.writeAll(entries2);
-        store3.writeAll(entries3);
-        LOGGER.info("Bulk operation write tests passed");
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        LOGGER.info("Running POJO strategy read tests");
-
-        LOGGER.info("Running single operation read tests");
-
-        Person person = (Person)store1.load(entries1.iterator().next().getKey());
-        if (!entries1.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        person = (Person)store2.load(entries2.iterator().next().getKey());
-        if (!entries2.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        person = (Person)store3.load(entries3.iterator().next().getKey());
-        if (!entries3.iterator().next().getValue().equals(person))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Single operation read tests passed");
-
-        LOGGER.info("Running bulk operation read tests");
-
-        Map persons = store1.loadAll(TestsHelper.getKeys(entries1));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries1, true))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        persons = store2.loadAll(TestsHelper.getKeys(entries2));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries2, true))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        persons = store3.loadAll(TestsHelper.getKeys(entries3));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
-            throw new RuntimeException("Person values was incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk operation read tests passed");
-
-        LOGGER.info("POJO strategy read tests passed");
-
-        LOGGER.info("Running POJO strategy delete tests");
-
-        store1.delete(entries1.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(entries1));
-
-        store2.delete(entries2.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(entries2));
-
-        store3.delete(entries3.iterator().next().getKey());
-        store3.deleteAll(TestsHelper.getKeys(entries3));
-
-        LOGGER.info("POJO strategy delete tests passed");
-    }
-}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java b/modules/cassandra/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
deleted file mode 100644
index 5de3097..0000000
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.net.URL;
-import org.apache.ignite.cache.store.cassandra.utils.DDLGenerator;
-import org.junit.Test;
-
-/**
- * DDLGenerator test.
- */
-public class DDLGeneratorTest {
-    @Test
-    @SuppressWarnings("unchecked")
-    /** */
-    public void generatorTest() {
-        ClassLoader clsLdr = DDLGeneratorTest.class.getClassLoader();
-
-        URL url1 = clsLdr.getResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml");
-        String file1 = url1.getFile(); // TODO IGNITE-1371 Possible NPE
-
-        URL url2 = clsLdr.getResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml");
-        String file2 = url2.getFile();  // TODO IGNITE-1371 Possible NPE
-
-        DDLGenerator.main(new String[]{file1, file2});
-    }
-
-}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java b/modules/cassandra/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
deleted file mode 100644
index 5da6ba2..0000000
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.util.Collection;
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.CachePeekMode;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.apache.ignite.tests.utils.CacheStoreHelper;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Unit tests for Ignite caches which utilizing {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}
- * to store cache data into Cassandra tables
- */
-public class IgnitePersistentStoreTest {
-    /** */
-    private static final Logger LOGGER = Logger.getLogger(IgnitePersistentStoreTest.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        LOGGER.info("Start tests execution");
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        try {
-            CassandraHelper.dropTestKeyspaces();
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-
-            if (CassandraHelper.useEmbeddedCassandra()) {
-                try {
-                    CassandraHelper.stopEmbeddedCassandra();
-                }
-                catch (Throwable e) {
-                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
-                }
-            }
-        }
-    }
-
-    /** */
-    @Test
-    public void primitiveStrategyTest() {
-        Ignition.stopAll(true);
-
-        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
-        Map<String, String> strMap = TestsHelper.generateStringsMap();
-
-        LOGGER.info("Running PRIMITIVE strategy write tests");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
-
-            LOGGER.info("Running single operation write tests");
-            longCache.put(1L, 1L);
-            strCache.put("1", "1");
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            longCache.putAll(longMap);
-            strCache.putAll(strMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("PRIMITIVE strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
-            LOGGER.info("Running PRIMITIVE strategy read tests");
-
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
-
-            LOGGER.info("Running single operation read tests");
-
-            Long longVal = longCache.get(1L);
-            if (!longVal.equals(longMap.get(1L)))
-                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
-
-            String strVal = strCache.get("1");
-            if (!strVal.equals(strMap.get("1")))
-                throw new RuntimeException("String value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
-            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
-                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
-
-            Map<String, String> strMap1 = strCache.getAll(strMap.keySet());
-            if (!TestsHelper.checkMapsEqual(strMap, strMap1))
-                throw new RuntimeException("String values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("PRIMITIVE strategy read tests passed");
-
-            LOGGER.info("Running PRIMITIVE strategy delete tests");
-
-            longCache.remove(1L);
-            longCache.removeAll(longMap.keySet());
-
-            strCache.remove("1");
-            strCache.removeAll(strMap.keySet());
-
-            LOGGER.info("PRIMITIVE strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void blobStrategyTest() {
-        Ignition.stopAll(true);
-
-        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
-        Map<Long, Person> personMap = TestsHelper.generateLongsPersonsMap();
-
-        LOGGER.info("Running BLOB strategy write tests");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
-
-            LOGGER.info("Running single operation write tests");
-            longCache.put(1L, 1L);
-            personCache.put(1L, TestsHelper.generateRandomPerson());
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            longCache.putAll(longMap);
-            personCache.putAll(personMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("BLOB strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
-            LOGGER.info("Running BLOB strategy read tests");
-
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
-
-            LOGGER.info("Running single operation read tests");
-
-            Long longVal = longCache.get(1L);
-            if (!longVal.equals(longMap.get(1L)))
-                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
-
-            Person person = personCache.get(1L);
-            if (!person.equals(personMap.get(1L)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
-            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
-                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
-
-            Map<Long, Person> personMap1 = personCache.getAll(personMap.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(personMap, personMap1, false))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("BLOB strategy read tests passed");
-
-            LOGGER.info("Running BLOB strategy delete tests");
-
-            longCache.remove(1L);
-            longCache.removeAll(longMap.keySet());
-
-            personCache.remove(1L);
-            personCache.removeAll(personMap.keySet());
-
-            LOGGER.info("BLOB strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void pojoStrategyTest() {
-        Ignition.stopAll(true);
-
-        LOGGER.info("Running POJO strategy write tests");
-
-        Map<Long, Person> personMap1 = TestsHelper.generateLongsPersonsMap();
-        Map<PersonId, Person> personMap2 = TestsHelper.generatePersonIdsPersonsMap();
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
-            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
-
-            LOGGER.info("Running single operation write tests");
-            personCache1.put(1L, TestsHelper.generateRandomPerson());
-            personCache2.put(TestsHelper.generateRandomPersonId(), TestsHelper.generateRandomPerson());
-            personCache3.put(TestsHelper.generateRandomPersonId(), TestsHelper.generateRandomPerson());
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            personCache1.putAll(personMap1);
-            personCache2.putAll(personMap2);
-            personCache3.putAll(personMap2);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            LOGGER.info("Running POJO strategy read tests");
-
-            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
-            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
-
-            LOGGER.info("Running single operation read tests");
-            Person person = personCache1.get(1L);
-            if (!person.equalsPrimitiveFields(personMap1.get(1L)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            PersonId id = personMap2.keySet().iterator().next();
-
-            person = personCache2.get(id);
-            if (!person.equalsPrimitiveFields(personMap2.get(id)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            person = personCache3.get(id);
-            if (!person.equals(personMap2.get(id)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Person> persons1 = personCache1.getAll(personMap1.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons1, personMap1, true))
-                throw new RuntimeException("Persons values batch was incorrectly deserialized from Cassandra");
-
-            Map<PersonId, Person> persons2 = personCache2.getAll(personMap2.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons2, personMap2, true))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            Map<PersonId, Person> persons3 = personCache3.getAll(personMap2.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons3, personMap2, false))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("POJO strategy read tests passed");
-
-            LOGGER.info("Running POJO strategy delete tests");
-
-            personCache1.remove(1L);
-            personCache1.removeAll(personMap1.keySet());
-
-            personCache2.remove(id);
-            personCache2.removeAll(personMap2.keySet());
-
-            personCache3.remove(id);
-            personCache3.removeAll(personMap2.keySet());
-
-            LOGGER.info("POJO strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void loadCacheTest() {
-        Ignition.stopAll(true);
-
-        LOGGER.info("Running loadCache test");
-
-        LOGGER.info("Filling Cassandra table with test data");
-
-        CacheStore store = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<PersonId, Person>> entries = TestsHelper.generatePersonIdsPersonsEntries();
-
-        store.writeAll(entries);
-
-        LOGGER.info("Cassandra table filled with test data");
-
-        LOGGER.info("Running loadCache test");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
-            int size = personCache3.size(CachePeekMode.ALL);
-
-            LOGGER.info("Initial cache size " + size);
-
-            LOGGER.info("Loading cache data from Cassandra table");
-
-            personCache3.loadCache(null, new String[] {"select * from test1.pojo_test3 limit 3"});
-
-            size = personCache3.size(CachePeekMode.ALL);
-            if (size != 3) {
-                throw new RuntimeException("Cache data was incorrectly loaded from Cassandra. " +
-                    "Expected number of records is 3, but loaded number of records is " + size);
-            }
-
-            LOGGER.info("Cache data loaded from Cassandra table");
-        }
-
-        LOGGER.info("loadCache test passed");
-    }
-}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java b/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
deleted file mode 100644
index 0bbda7f..0000000
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.ResourceBundle;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Generator;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Helper class for all tests
- */
-public class TestsHelper {
-    /** */
-    private static final String LETTERS_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-
-    /** */
-    private static final String NUMBERS_ALPHABET = "0123456789";
-
-    /** */
-    private static final Random RANDOM = new Random(System.currentTimeMillis());
-
-    /** */
-    private static final ResourceBundle TESTS_SETTINGS = ResourceBundle.getBundle("tests");
-
-    /** */
-    private static final int BULK_OPERATION_SIZE = parseTestSettings("bulk.operation.size");
-
-    /** */
-    private static final String LOAD_TESTS_CACHE_NAME = TESTS_SETTINGS.getString("load.tests.cache.name");
-
-    /** */
-    private static final int LOAD_TESTS_THREADS_COUNT = parseTestSettings("load.tests.threads.count");
-
-    /** */
-    private static final int LOAD_TESTS_WARMUP_PERIOD = parseTestSettings("load.tests.warmup.period");
-
-    /** */
-    private static final int LOAD_TESTS_EXECUTION_TIME = parseTestSettings("load.tests.execution.time");
-
-    /** */
-    private static final int LOAD_TESTS_REQUESTS_LATENCY = parseTestSettings("load.tests.requests.latency");
-
-    /** */
-    private static final String LOAD_TESTS_PERSISTENCE_SETTINGS = TESTS_SETTINGS.getString("load.tests.persistence.settings");
-
-    /** */
-    private static final String LOAD_TESTS_IGNITE_CONFIG = TESTS_SETTINGS.getString("load.tests.ignite.config");
-
-    /** */
-    private static final Generator LOAD_TESTS_KEY_GENERATOR;
-
-    /** */
-    private static final Generator LOAD_TESTS_VALUE_GENERATOR;
-
-    /** */
-    private static int parseTestSettings(String name) {
-        return Integer.parseInt(TESTS_SETTINGS.getString(name));
-    }
-
-    static {
-        try {
-            LOAD_TESTS_KEY_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.key.generator")).newInstance();
-            LOAD_TESTS_VALUE_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.value.generator")).newInstance();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to initialize TestsHelper", e);
-        }
-    }
-
-    /** */
-    public static int getLoadTestsThreadsCount() {
-        return LOAD_TESTS_THREADS_COUNT;
-    }
-
-    /** */
-    public static int getLoadTestsWarmupPeriod() {
-        return LOAD_TESTS_WARMUP_PERIOD;
-    }
-
-    /** */
-    public static int getLoadTestsExecutionTime() {
-        return LOAD_TESTS_EXECUTION_TIME;
-    }
-
-    /** */
-    public static int getLoadTestsRequestsLatency() {
-        return LOAD_TESTS_REQUESTS_LATENCY;
-    }
-
-    /** */
-    public static ClassPathResource getLoadTestsPersistenceSettings() {
-        return new ClassPathResource(LOAD_TESTS_PERSISTENCE_SETTINGS);
-    }
-
-    /** */
-    public static String getLoadTestsIgniteConfig() {
-        return LOAD_TESTS_IGNITE_CONFIG;
-    }
-
-    /** */
-    public static int getBulkOperationSize() {
-        return BULK_OPERATION_SIZE;
-    }
-
-    /** */
-    public static String getLoadTestsCacheName() {
-        return LOAD_TESTS_CACHE_NAME;
-    }
-
-    /** */
-    public static Object generateLoadTestsKey(long i) {
-        return LOAD_TESTS_KEY_GENERATOR.generate(i);
-    }
-
-    /** */
-    public static Object generateLoadTestsValue(long i) {
-        return LOAD_TESTS_VALUE_GENERATOR.generate(i);
-    }
-
-    /** */
-    @SuppressWarnings("unchecked")
-    public static CacheEntryImpl generateLoadTestsEntry(long i) {
-        return new CacheEntryImpl(TestsHelper.generateLoadTestsKey(i), TestsHelper.generateLoadTestsValue(i));
-    }
-
-    /** */
-    public static <K, V> Collection<K> getKeys(Collection<CacheEntryImpl<K, V>> entries) {
-        List<K> list = new LinkedList<>();
-
-        for (CacheEntryImpl<K, ?> entry : entries)
-            list.add(entry.getKey());
-
-        return list;
-    }
-
-    /** */
-    public static Map<Long, Long> generateLongsMap() {
-        return generateLongsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<Long, Long> generateLongsMap(int cnt) {
-        Map<Long, Long> map = new HashMap<>();
-
-        for (long i = 0; i < cnt; i++)
-            map.put(i, i + 123);
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries() {
-        return generateLongsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries(int cnt) {
-        Collection<CacheEntryImpl<Long, Long>> entries = new LinkedList<>();
-
-        for (long i = 0; i < cnt; i++)
-            entries.add(new CacheEntryImpl<>(i, i + 123));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<String, String> generateStringsMap() {
-        return generateStringsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<String, String> generateStringsMap(int cnt) {
-        Map<String, String> map = new HashMap<>();
-
-        for (int i = 0; i < cnt; i++)
-            map.put(Integer.toString(i), randomString(5));
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries() {
-        return generateStringsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries(int cnt) {
-        Collection<CacheEntryImpl<String, String>> entries = new LinkedList<>();
-
-        for (int i = 0; i < cnt; i++)
-            entries.add(new CacheEntryImpl<>(Integer.toString(i), randomString(5)));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<Long, Person> generateLongsPersonsMap() {
-        Map<Long, Person> map = new HashMap<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
-            map.put(i, generateRandomPerson());
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Person>> generateLongsPersonsEntries() {
-        Collection<CacheEntryImpl<Long, Person>> entries = new LinkedList<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
-            entries.add(new CacheEntryImpl<>(i, generateRandomPerson()));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<PersonId, Person> generatePersonIdsPersonsMap() {
-        return generatePersonIdsPersonsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<PersonId, Person> generatePersonIdsPersonsMap(int cnt) {
-        Map<PersonId, Person> map = new HashMap<>();
-
-        for (int i = 0; i < cnt; i++)
-            map.put(generateRandomPersonId(), generateRandomPerson());
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries() {
-        return generatePersonIdsPersonsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries(int cnt) {
-        Collection<CacheEntryImpl<PersonId, Person>> entries = new LinkedList<>();
-
-        for (int i = 0; i < cnt; i++)
-            entries.add(new CacheEntryImpl<>(generateRandomPersonId(), generateRandomPerson()));
-
-        return entries;
-    }
-
-    /** */
-    public static Person generateRandomPerson() {
-        int phonesCnt = RANDOM.nextInt(4);
-
-        List<String> phones = new LinkedList<>();
-
-        for (int i = 0; i < phonesCnt; i++)
-            phones.add(randomNumber(4));
-
-        return new Person(randomString(4), randomString(4), RANDOM.nextInt(100),
-            RANDOM.nextBoolean(), RANDOM.nextLong(), RANDOM.nextFloat(), new Date(), phones);
-    }
-
-    /** */
-    public static PersonId generateRandomPersonId() {
-        return new PersonId(randomString(4), randomString(4), RANDOM.nextInt(100));
-    }
-
-    /** */
-    public static boolean checkMapsEqual(Map map1, Map map2) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (Object key : map1.keySet()) {
-            Object obj1 = map1.get(key);
-            Object obj2 = map2.get(key);
-
-            if (obj1 == null || obj2 == null || !obj1.equals(obj2))
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K, V> boolean checkCollectionsEqual(Map<K, V> map, Collection<CacheEntryImpl<K, V>> col) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, V> entry : col) {
-            if (!entry.getValue().equals(map.get(entry.getKey())))
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkPersonMapsEqual(Map<K, Person> map1, Map<K, Person> map2,
-        boolean primitiveFieldsOnly) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (K key : map1.keySet()) {
-            Person person1 = map1.get(key);
-            Person person2 = map2.get(key);
-
-            boolean equals = person1 != null && person2 != null &&
-                (primitiveFieldsOnly ? person1.equalsPrimitiveFields(person2) : person1.equals(person2));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkPersonCollectionsEqual(Map<K, Person> map, Collection<CacheEntryImpl<K, Person>> col,
-        boolean primitiveFieldsOnly) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, Person> entry : col) {
-            boolean equals = primitiveFieldsOnly ?
-                entry.getValue().equalsPrimitiveFields(map.get(entry.getKey())) :
-                entry.getValue().equals(map.get(entry.getKey()));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static String randomString(int len) {
-        StringBuilder builder = new StringBuilder(len);
-
-        for (int i = 0; i < len; i++)
-            builder.append(LETTERS_ALPHABET.charAt(RANDOM.nextInt(LETTERS_ALPHABET.length())));
-
-        return builder.toString();
-    }
-
-    /** */
-    public static String randomNumber(int len) {
-        StringBuilder builder = new StringBuilder(len);
-
-        for (int i = 0; i < len; i++)
-            builder.append(NUMBERS_ALPHABET.charAt(RANDOM.nextInt(NUMBERS_ALPHABET.length())));
-
-        return builder.toString();
-    }
-}
diff --git a/modules/cassandra/store/README.txt b/modules/cassandra/store/README.txt
new file mode 100644
index 0000000..fd72dea
--- /dev/null
+++ b/modules/cassandra/store/README.txt
@@ -0,0 +1,32 @@
+Apache Ignite Cassandra Store Module
+------------------------
+
+Apache Ignite Cassandra Store module provides CacheStore implementation backed by Cassandra database.
+
+To enable Cassandra Store module when starting a standalone node, move 'optional/ignite-cassandra-store' folder to
+'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
+be added to classpath in this case.
+
+Importing Cassandra Store Module In Maven Project
+-------------------------------------
+
+If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
+dependency like this (replace '${ignite.version}' with actual Ignite version you are
+interested in):
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    ...
+    <dependencies>
+        ...
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-cassandra-store</artifactId>
+            <version>${ignite.version}</version>
+        </dependency>
+        ...
+    </dependencies>
+    ...
+</project>
diff --git a/modules/cassandra/licenses/apache-2.0.txt b/modules/cassandra/store/licenses/apache-2.0.txt
similarity index 100%
rename from modules/cassandra/licenses/apache-2.0.txt
rename to modules/cassandra/store/licenses/apache-2.0.txt
diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml
new file mode 100644
index 0000000..0b233fa
--- /dev/null
+++ b/modules/cassandra/store/pom.xml
@@ -0,0 +1,305 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    POM file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.ignite</groupId>
+        <artifactId>ignite-cassandra</artifactId>
+        <version>1.8.0-SNAPSHOT</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <artifactId>ignite-cassandra-store</artifactId>
+    <version>1.8.0-SNAPSHOT</version>
+    <url>http://ignite.apache.org</url>
+
+    <properties>
+        <commons-beanutils.version>1.8.3</commons-beanutils.version>
+        <cassandra-driver.version>3.0.0</cassandra-driver.version>
+        <cassandra-all.version>3.3</cassandra-all.version>
+        <netty.version>4.0.33.Final</netty.version>
+        <guava.version>19.0</guava.version>
+        <metrics-core.version>3.0.2</metrics-core.version>
+    </properties>
+
+    <dependencies>
+        <!-- Apache commons -->
+        <dependency>
+            <groupId>commons-beanutils</groupId>
+            <artifactId>commons-beanutils</artifactId>
+            <version>${commons-beanutils.version}</version>
+        </dependency>
+
+        <!-- Ignite -->
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-spring</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-log4j</artifactId>
+            <version>${project.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Cassandra and required dependencies -->
+        <dependency>
+            <groupId>com.datastax.cassandra</groupId>
+            <artifactId>cassandra-driver-core</artifactId>
+            <version>${cassandra-driver.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-handler</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-buffer</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-common</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-transport</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-codec</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>${guava.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.codahale.metrics</groupId>
+            <artifactId>metrics-core</artifactId>
+            <version>${metrics-core.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.cassandra</groupId>
+            <artifactId>cassandra-all</artifactId>
+            <version>${cassandra-all.version}</version>
+            <scope>test</scope>
+            <exclusions>
+                <exclusion>
+                    <artifactId>log4j-over-slf4j</artifactId>
+                    <groupId>org.slf4j</groupId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <!-- Apache log4j -->
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.2</version>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                    <compilerVersion>1.7</compilerVersion>
+                    <encoding>UTF-8</encoding>
+                    <fork>true</fork>
+                    <debug>false</debug>
+                    <debuglevel>lines,vars,source</debuglevel>
+                    <meminitial>256</meminitial>
+                    <maxmem>512</maxmem>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <version>2.10</version>
+                <executions>
+                    <execution>
+                        <id>copy-all-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${project.build.directory}/tests-package/lib</outputDirectory>
+                            <overWriteReleases>false</overWriteReleases>
+                            <overWriteSnapshots>false</overWriteSnapshots>
+                            <overWriteIfNewer>true</overWriteIfNewer>
+                            <excludeArtifactIds>
+                                netty-all,cassandra-all,snappy-java,lz4,compress-lzf,commons-codec,commons-lang3,commons-math3,
+                                concurrentlinkedhashmap-lru,antlr,ST4,antlr-runtime,jcl-over-slf4j,jackson-core-asl,
+                                jackson-mapper-asl,json-simple,high-scale-lib,snakeyaml,jbcrypt,reporter-config3,
+                                reporter-config-base,hibernate-validator,validation-api,jboss-logging,thrift-server,
+                                disruptor,stream,fastutil,logback-core,logback-classic,libthrift,httpclient,httpcore,
+                                cassandra-thrift,jna,jamm,joda-time,sigar,ecj,tools
+                            </excludeArtifactIds>
+                        </configuration>
+                    </execution>
+<!-- -->
+                    <execution>
+                        <id>copy-main-dependencies</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${project.build.directory}/libs</outputDirectory>
+                            <overWriteReleases>false</overWriteReleases>
+                            <overWriteSnapshots>false</overWriteSnapshots>
+                            <overWriteIfNewer>true</overWriteIfNewer>
+                            <excludeTransitive>true</excludeTransitive>
+                            <excludeGroupIds>
+                                org.apache.ignite,org.springframework,org.gridgain
+                            </excludeGroupIds>
+                            <excludeArtifactIds>
+                                commons-logging,slf4j-api,cache-api,slf4j-api,aopalliance
+                            </excludeArtifactIds>
+                            <includeScope>runtime</includeScope>
+                        </configuration>
+                    </execution>
+<!-- -->
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-antrun-plugin</artifactId>
+                <version>1.8</version>
+                <dependencies>
+                    <dependency>
+                        <groupId>ant-contrib</groupId>
+                        <artifactId>ant-contrib</artifactId>
+                        <version>1.0b3</version>
+                        <exclusions>
+                            <exclusion>
+                                <groupId>ant</groupId>
+                                <artifactId>ant</artifactId>
+                            </exclusion>
+                        </exclusions>
+                    </dependency>
+                </dependencies>
+                <executions>
+                    <execution>
+                        <id>package-tests</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>run</goal>
+                        </goals>
+                        <configuration>
+                            <target>
+                                <taskdef resource="net/sf/antcontrib/antlib.xml" />
+                                <if>
+                                    <available file="${project.build.directory}/test-classes" type="dir" />
+                                    <then>
+                                        <copy todir="${project.build.directory}/tests-package/lib">
+                                            <fileset dir="${project.build.directory}">
+                                                <include name="*.jar" />
+                                            </fileset>
+                                        </copy>
+
+                                        <jar destfile="${project.build.directory}/tests-package/lib/${project.artifactId}-${project.version}-tests.jar">
+                                            <fileset dir="${project.build.directory}/test-classes">
+                                                <include name="**/*.class" />
+                                            </fileset>
+                                        </jar>
+
+                                        <copy todir="${project.build.directory}/tests-package/settings">
+                                            <fileset dir="${project.build.directory}/test-classes">
+                                                <include name="**/*.properties" />
+                                                <include name="**/*.xml" />
+                                            </fileset>
+                                        </copy>
+
+                                        <copy todir="${project.build.directory}/tests-package">
+                                            <fileset dir="${project.build.testSourceDirectory}/../scripts">
+                                                <include name="**/*" />
+                                            </fileset>
+                                        </copy>
+
+                                        <fixcrlf srcdir="${project.build.directory}/tests-package" eol="lf" eof="remove">
+                                            <include name="*.sh" />
+                                        </fixcrlf>
+
+                                        <copy todir="${project.build.directory}/tests-package">
+                                            <fileset dir="${project.build.testSourceDirectory}/..">
+                                                <include name="bootstrap/**" />
+                                            </fileset>
+                                        </copy>
+
+                                        <fixcrlf srcdir="${project.build.directory}/tests-package/bootstrap" eol="lf" eof="remove">
+                                            <include name="**" />
+                                        </fixcrlf>
+
+                                        <zip destfile="${project.build.directory}/ignite-cassandra-tests-${project.version}.zip" compress="true" whenempty="create" level="9" encoding="UTF-8" useLanguageEncodingFlag="true" createUnicodeExtraFields="not-encodeable">
+
+                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests">
+                                                <exclude name="**/*.sh" />
+                                            </zipfileset>
+
+                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests" filemode="555">
+                                                <include name="**/*.sh" />
+                                            </zipfileset>
+                                        </zip>
+                                    </then>
+                                </if>
+                            </target>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+
+        </plugins>
+    </build>
+</project>
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
similarity index 71%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
index f7e7917..9058837 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
@@ -22,8 +22,10 @@
 import com.datastax.driver.core.Row;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
+import java.util.HashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -41,6 +43,9 @@
 import org.apache.ignite.cache.store.cassandra.session.ExecutionAssistant;
 import org.apache.ignite.cache.store.cassandra.session.GenericBatchExecutionAssistant;
 import org.apache.ignite.cache.store.cassandra.session.LoadCacheCustomQueryWorker;
+import org.apache.ignite.cache.store.cassandra.session.transaction.DeleteMutation;
+import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
+import org.apache.ignite.cache.store.cassandra.session.transaction.WriteMutation;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.logger.NullLogger;
@@ -54,14 +59,16 @@
  * @param <V> Ignite cache value type.
  */
 public class CassandraCacheStore<K, V> implements CacheStore<K, V> {
-    /** Connection attribute property name. */
-    private static final String ATTR_CONN_PROP = "CASSANDRA_STORE_CONNECTION";
+    /** Buffer to store mutations performed withing transaction. */
+    private static final String TRANSACTION_BUFFER = "CASSANDRA_TRANSACTION_BUFFER";
 
     /** Auto-injected store session. */
+    @SuppressWarnings("unused")
     @CacheStoreSessionResource
     private CacheStoreSession storeSes;
 
     /** Auto-injected logger instance. */
+    @SuppressWarnings("unused")
     @LoggerResource
     private IgniteLogger log;
 
@@ -72,7 +79,7 @@
     private int maxPoolSize = Runtime.getRuntime().availableProcessors();
 
     /** Controller component responsible for serialization logic. */
-    private PersistenceController controller;
+    private final PersistenceController controller;
 
     /**
      * Store constructor.
@@ -89,9 +96,12 @@
 
     /** {@inheritDoc} */
     @Override public void loadCache(IgniteBiInClosure<K, V> clo, Object... args) throws CacheLoaderException {
-        if (clo == null || args == null || args.length == 0)
+        if (clo == null)
             return;
 
+        if (args == null || args.length == 0)
+            args = new String[] {"select * from " + controller.getPersistenceSettings().getKeyspace() + "." + cassandraTable() + ";"};
+
         ExecutorService pool = null;
 
         Collection<Future<?>> futs = new ArrayList<>(args.length);
@@ -127,12 +137,22 @@
 
     /** {@inheritDoc} */
     @Override public void sessionEnd(boolean commit) throws CacheWriterException {
-        if (storeSes == null || storeSes.transaction() == null)
+        if (!storeSes.isWithinTransaction())
             return;
 
-        CassandraSession cassandraSes = (CassandraSession) storeSes.properties().remove(ATTR_CONN_PROP);
+        List<Mutation> mutations = mutations();
+        if (mutations == null || mutations.isEmpty())
+            return;
 
-        U.closeQuiet(cassandraSes);
+        CassandraSession ses = getCassandraSession();
+
+        try {
+            ses.execute(mutations);
+        }
+        finally {
+            mutations.clear();
+            U.closeQuiet(ses);
+        }
     }
 
     /** {@inheritDoc} */
@@ -145,33 +165,44 @@
 
         try {
             return ses.execute(new ExecutionAssistant<V>() {
+                /** {@inheritDoc} */
                 @Override public boolean tableExistenceRequired() {
                     return false;
                 }
 
-                @Override public String getStatement() {
-                    return controller.getLoadStatement(false);
+                /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
                 }
 
+                /** {@inheritDoc} */
+                @Override public String getStatement() {
+                    return controller.getLoadStatement(cassandraTable(), false);
+                }
+
+                /** {@inheritDoc} */
                 @Override public BoundStatement bindStatement(PreparedStatement statement) {
                     return controller.bindKey(statement, key);
                 }
 
+                /** {@inheritDoc} */
                 @Override public KeyValuePersistenceSettings getPersistenceSettings() {
                     return controller.getPersistenceSettings();
                 }
 
+                /** {@inheritDoc} */
                 @Override public String operationName() {
                     return "READ";
                 }
 
+                /** {@inheritDoc} */
                 @Override public V process(Row row) {
                     return row == null ? null : (V)controller.buildValueObject(row);
                 }
             });
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -188,8 +219,13 @@
                 private Map<K, V> data = new HashMap<>();
 
                 /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
+                }
+
+                /** {@inheritDoc} */
                 @Override public String getStatement() {
-                    return controller.getLoadStatement(true);
+                    return controller.getLoadStatement(cassandraTable(), true);
                 }
 
                 /** {@inheritDoc} */
@@ -219,7 +255,7 @@
             }, keys);
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -228,37 +264,53 @@
         if (entry == null || entry.getKey() == null)
             return;
 
+        if (storeSes.isWithinTransaction()) {
+            accumulate(new WriteMutation(entry, cassandraTable(), controller));
+            return;
+        }
+
         CassandraSession ses = getCassandraSession();
 
         try {
             ses.execute(new ExecutionAssistant<Void>() {
+                /** {@inheritDoc} */
                 @Override public boolean tableExistenceRequired() {
                     return true;
                 }
 
-                @Override public String getStatement() {
-                    return controller.getWriteStatement();
+                /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
                 }
 
+                /** {@inheritDoc} */
+                @Override public String getStatement() {
+                    return controller.getWriteStatement(cassandraTable());
+                }
+
+                /** {@inheritDoc} */
                 @Override public BoundStatement bindStatement(PreparedStatement statement) {
                     return controller.bindKeyValue(statement, entry.getKey(), entry.getValue());
                 }
 
+                /** {@inheritDoc} */
                 @Override public KeyValuePersistenceSettings getPersistenceSettings() {
                     return controller.getPersistenceSettings();
                 }
 
+                /** {@inheritDoc} */
                 @Override public String operationName() {
                     return "WRITE";
                 }
 
+                /** {@inheritDoc} */
                 @Override public Void process(Row row) {
                     return null;
                 }
             });
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -267,13 +319,25 @@
         if (entries == null || entries.isEmpty())
             return;
 
+        if (storeSes.isWithinTransaction()) {
+            for (Cache.Entry<?, ?> entry : entries)
+                accumulate(new WriteMutation(entry, cassandraTable(), controller));
+
+            return;
+        }
+
         CassandraSession ses = getCassandraSession();
 
         try {
             ses.execute(new GenericBatchExecutionAssistant<Void, Cache.Entry<? extends K, ? extends V>>() {
                 /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
+                }
+
+                /** {@inheritDoc} */
                 @Override public String getStatement() {
-                    return controller.getWriteStatement();
+                    return controller.getWriteStatement(cassandraTable());
                 }
 
                 /** {@inheritDoc} */
@@ -299,7 +363,7 @@
             }, entries);
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -308,38 +372,54 @@
         if (key == null)
             return;
 
+        if (storeSes.isWithinTransaction()) {
+            accumulate(new DeleteMutation(key, cassandraTable(), controller));
+            return;
+        }
+
         CassandraSession ses = getCassandraSession();
 
         try {
             ses.execute(new ExecutionAssistant<Void>() {
+                /** {@inheritDoc} */
                 @Override public boolean tableExistenceRequired() {
                     return false;
                 }
 
-                @Override public String getStatement() {
-                    return controller.getDeleteStatement();
+                /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
                 }
 
+                /** {@inheritDoc} */
+                @Override public String getStatement() {
+                    return controller.getDeleteStatement(cassandraTable());
+                }
+
+                /** {@inheritDoc} */
                 @Override public BoundStatement bindStatement(PreparedStatement statement) {
                     return controller.bindKey(statement, key);
                 }
 
 
+                /** {@inheritDoc} */
                 @Override public KeyValuePersistenceSettings getPersistenceSettings() {
                     return controller.getPersistenceSettings();
                 }
 
+                /** {@inheritDoc} */
                 @Override public String operationName() {
                     return "DELETE";
                 }
 
+                /** {@inheritDoc} */
                 @Override public Void process(Row row) {
                     return null;
                 }
             });
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -348,13 +428,25 @@
         if (keys == null || keys.isEmpty())
             return;
 
+        if (storeSes.isWithinTransaction()) {
+            for (Object key : keys)
+                accumulate(new DeleteMutation(key, cassandraTable(), controller));
+
+            return;
+        }
+
         CassandraSession ses = getCassandraSession();
 
         try {
             ses.execute(new GenericBatchExecutionAssistant<Void, Object>() {
                 /** {@inheritDoc} */
+                @Override public String getTable() {
+                    return cassandraTable();
+                }
+
+                /** {@inheritDoc} */
                 @Override public String getStatement() {
-                    return controller.getDeleteStatement();
+                    return controller.getDeleteStatement(cassandraTable());
                 }
 
                 /** {@inheritDoc} */
@@ -367,13 +459,14 @@
                     return controller.getPersistenceSettings();
                 }
 
+                /** {@inheritDoc} */
                 @Override public String operationName() {
                     return "BULK_DELETE";
                 }
             }, keys);
         }
         finally {
-            closeCassandraSession(ses);
+            U.closeQuiet(ses);
         }
     }
 
@@ -384,26 +477,43 @@
      * @return Cassandra session wrapper.
      */
     private CassandraSession getCassandraSession() {
-        if (storeSes == null || storeSes.transaction() == null)
-            return dataSrc.session(log != null ? log : new NullLogger());
-
-        CassandraSession ses = (CassandraSession) storeSes.properties().get(ATTR_CONN_PROP);
-
-        if (ses == null) {
-            ses = dataSrc.session(log != null ? log : new NullLogger());
-            storeSes.properties().put(ATTR_CONN_PROP, ses);
-        }
-
-        return ses;
+        return dataSrc.session(log != null ? log : new NullLogger());
     }
 
     /**
-     * Releases Cassandra related resources.
+     * Returns table name to use for all Cassandra based operations (READ/WRITE/DELETE).
      *
-     * @param ses Cassandra session wrapper.
+     * @return Table name.
      */
-    private void closeCassandraSession(CassandraSession ses) {
-        if (ses != null && (storeSes == null || storeSes.transaction() == null))
-            U.closeQuiet(ses);
+    private String cassandraTable() {
+        return controller.getPersistenceSettings().getTable() != null ?
+            controller.getPersistenceSettings().getTable() : storeSes.cacheName().trim().toLowerCase();
+    }
+
+    /**
+     * Accumulates mutation in the transaction buffer.
+     *
+     * @param mutation Mutation operation.
+     */
+    private void accumulate(Mutation mutation) {
+        //noinspection unchecked
+        List<Mutation> mutations = (List<Mutation>)storeSes.properties().get(TRANSACTION_BUFFER);
+
+        if (mutations == null) {
+            mutations = new LinkedList<>();
+            storeSes.properties().put(TRANSACTION_BUFFER, mutations);
+        }
+
+        mutations.add(mutation);
+    }
+
+    /**
+     * Returns all the mutations performed withing transaction.
+     *
+     * @return Mutations
+     */
+    private List<Mutation> mutations() {
+        //noinspection unchecked
+        return (List<Mutation>)storeSes.properties().get(TRANSACTION_BUFFER);
     }
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
similarity index 99%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
index 7584dfb..71bb737 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
@@ -46,7 +46,7 @@
     private String persistenceSettingsBean;
 
     /** Data source. */
-    private transient DataSource dataSrc;
+    private DataSource dataSrc;
 
     /** Persistence settings. */
     private KeyValuePersistenceSettings persistenceSettings;
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
similarity index 68%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
index d3bff7f..badd5df 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
@@ -18,10 +18,15 @@
 package org.apache.ignite.cache.store.cassandra.common;
 
 import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.DataType;
 import com.datastax.driver.core.Session;
+import com.datastax.driver.core.exceptions.DriverException;
 import com.datastax.driver.core.exceptions.InvalidQueryException;
 import com.datastax.driver.core.exceptions.NoHostAvailableException;
 import com.datastax.driver.core.exceptions.ReadTimeoutException;
+
+import java.net.InetSocketAddress;
+import java.util.Map;
 import java.util.regex.Pattern;
 import org.apache.ignite.internal.util.typedef.internal.U;
 
@@ -35,8 +40,15 @@
     /** Cassandra error message if trying to create table inside nonexistent keyspace. */
     private static final Pattern KEYSPACE_EXIST_ERROR2 = Pattern.compile("Cannot add table '[0-9a-zA-Z_]+' to non existing keyspace.*");
 
+    /** Cassandra error message if trying to create table inside nonexistent keyspace. */
+    private static final Pattern KEYSPACE_EXIST_ERROR3 = Pattern.compile("Error preparing query, got ERROR INVALID: " +
+            "Keyspace [0-9a-zA-Z_]+ does not exist");
+
     /** Cassandra error message if specified table doesn't exist. */
-    private static final Pattern TABLE_EXIST_ERROR = Pattern.compile("unconfigured table [0-9a-zA-Z_]+");
+    private static final Pattern TABLE_EXIST_ERROR1 = Pattern.compile("unconfigured table [0-9a-zA-Z_]+");
+
+    /** Cassandra error message if specified table doesn't exist. */
+    private static final String TABLE_EXIST_ERROR2 = "Error preparing query, got ERROR INVALID: unconfigured table";
 
     /** Cassandra error message if trying to use prepared statement created from another session. */
     private static final String PREP_STATEMENT_CLUSTER_INSTANCE_ERROR = "You may have used a PreparedStatement that " +
@@ -84,11 +96,25 @@
     public static boolean isTableAbsenceError(Throwable e) {
         while (e != null) {
             if (e instanceof InvalidQueryException &&
-                (TABLE_EXIST_ERROR.matcher(e.getMessage()).matches() ||
+                (TABLE_EXIST_ERROR1.matcher(e.getMessage()).matches() ||
                     KEYSPACE_EXIST_ERROR1.matcher(e.getMessage()).matches() ||
                     KEYSPACE_EXIST_ERROR2.matcher(e.getMessage()).matches()))
                 return true;
 
+            if (e instanceof NoHostAvailableException && ((NoHostAvailableException) e).getErrors() != null) {
+                NoHostAvailableException ex = (NoHostAvailableException)e;
+
+                for (Map.Entry<InetSocketAddress, Throwable> entry : ex.getErrors().entrySet()) {
+                    //noinspection ThrowableResultOfMethodCallIgnored
+                    Throwable error = entry.getValue();
+
+                    if (error instanceof DriverException &&
+                        (error.getMessage().contains(TABLE_EXIST_ERROR2) ||
+                             KEYSPACE_EXIST_ERROR3.matcher(error.getMessage()).matches()))
+                        return true;
+                }
+            }
+
             e = e.getCause();
         }
 
@@ -129,5 +155,22 @@
 
         return false;
     }
+
+    /**
+     * Checks if two Java classes are Cassandra compatible - mapped to the same Cassandra type.
+     *
+     * @param type1 First type.
+     * @param type2 Second type.
+     * @return {@code true} if classes are compatible and {@code false} if not.
+     */
+    public static boolean isCassandraCompatibleTypes(Class type1, Class type2) {
+        if (type1 == null || type2 == null)
+            return false;
+
+        DataType.Name t1 = PropertyMappingHelper.getCassandraType(type1);
+        DataType.Name t2 = PropertyMappingHelper.getCassandraType(type2);
+
+        return t1 != null && t2 != null && t1.equals(t2);
+    }
 }
 
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
similarity index 97%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
index 9053a93..64b784b 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
@@ -130,8 +130,7 @@
             return list;
 
         for (PropertyDescriptor descriptor : descriptors) {
-            if (descriptor.getReadMethod() == null || descriptor.getWriteMethod() == null ||
-                (primitive && !isPrimitivePropertyDescriptor(descriptor)))
+            if (descriptor.getReadMethod() == null || (primitive && !isPrimitivePropertyDescriptor(descriptor)))
                 continue;
 
             if (annotation == null || descriptor.getReadMethod().getAnnotation(annotation) != null)
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
similarity index 98%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
index 6745a16..f2e57a9 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
@@ -43,7 +43,7 @@
     private Random random = new Random(System.currentTimeMillis());
 
     /** */
-    private int summary = 0;
+    private int summary;
 
     /**
      * Creates sleeper instance.
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
similarity index 93%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
index e1fd60c..a2358a6 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
@@ -17,10 +17,12 @@
 
 package org.apache.ignite.cache.store.cassandra.datasource;
 
+import java.io.Serializable;
+
 /**
  * Provides credentials for Cassandra (instead of specifying user/password directly in Spring context XML).
  */
-public interface Credentials {
+public interface Credentials extends Serializable {
     /**
      * Returns user name
      *
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
similarity index 77%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
index 1ecb28f..f582aac 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
@@ -31,19 +31,37 @@
 import com.datastax.driver.core.policies.ReconnectionPolicy;
 import com.datastax.driver.core.policies.RetryPolicy;
 import com.datastax.driver.core.policies.SpeculativeExecutionPolicy;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.Serializable;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.UUID;
+
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cache.store.cassandra.session.CassandraSession;
 import org.apache.ignite.cache.store.cassandra.session.CassandraSessionImpl;
+import org.apache.ignite.internal.util.typedef.internal.U;
 
 /**
  * Data source abstraction to specify configuration of the Cassandra session to be used.
  */
-public class DataSource {
+public class DataSource implements Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Null object, used as a replacement for those Cassandra connection options which
+     * don't support serialization (RetryPolicy, LoadBalancingPolicy and etc).
+     */
+    private static final UUID NULL_OBJECT = UUID.fromString("45ffae47-3193-5910-84a2-048fe65735d9");
+
     /** Number of rows to immediately fetch in CQL statement execution. */
     private Integer fetchSize;
 
@@ -324,7 +342,7 @@
      * @param plc Load balancing policy.
      */
     public void setLoadBalancingPolicy(LoadBalancingPolicy plc) {
-        this.loadBalancingPlc = plc;
+        loadBalancingPlc = plc;
 
         invalidate();
     }
@@ -336,7 +354,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setReconnectionPolicy(ReconnectionPolicy plc) {
-        this.reconnectionPlc = plc;
+        reconnectionPlc = plc;
 
         invalidate();
     }
@@ -348,7 +366,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setRetryPolicy(RetryPolicy plc) {
-        this.retryPlc = plc;
+        retryPlc = plc;
 
         invalidate();
     }
@@ -360,7 +378,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setAddressTranslator(AddressTranslator translator) {
-        this.addrTranslator = translator;
+        addrTranslator = translator;
 
         invalidate();
     }
@@ -372,7 +390,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setSpeculativeExecutionPolicy(SpeculativeExecutionPolicy plc) {
-        this.speculativeExecutionPlc = plc;
+        speculativeExecutionPlc = plc;
 
         invalidate();
     }
@@ -384,7 +402,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setAuthProvider(AuthProvider provider) {
-        this.authProvider = provider;
+        authProvider = provider;
 
         invalidate();
     }
@@ -396,7 +414,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setSslOptions(SSLOptions options) {
-        this.sslOptions = options;
+        sslOptions = options;
 
         invalidate();
     }
@@ -408,7 +426,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setPoolingOptions(PoolingOptions options) {
-        this.poolingOptions = options;
+        poolingOptions = options;
 
         invalidate();
     }
@@ -420,7 +438,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setSocketOptions(SocketOptions options) {
-        this.sockOptions = options;
+        sockOptions = options;
 
         invalidate();
     }
@@ -432,7 +450,7 @@
      */
     @SuppressWarnings("UnusedDeclaration")
     public void setNettyOptions(NettyOptions options) {
-        this.nettyOptions = options;
+        nettyOptions = options;
 
         invalidate();
     }
@@ -522,6 +540,85 @@
         return ses = new CassandraSessionImpl(builder, fetchSize, readConsistency, writeConsistency, log);
     }
 
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeObject(fetchSize);
+        out.writeObject(readConsistency);
+        out.writeObject(writeConsistency);
+        U.writeString(out, user);
+        U.writeString(out, pwd);
+        out.writeObject(port);
+        out.writeObject(contactPoints);
+        out.writeObject(contactPointsWithPorts);
+        out.writeObject(maxSchemaAgreementWaitSeconds);
+        out.writeObject(protoVer);
+        U.writeString(out, compression);
+        out.writeObject(useSSL);
+        out.writeObject(collectMetrix);
+        out.writeObject(jmxReporting);
+        out.writeObject(creds);
+        writeObject(out, loadBalancingPlc);
+        writeObject(out, reconnectionPlc);
+        writeObject(out, addrTranslator);
+        writeObject(out, speculativeExecutionPlc);
+        writeObject(out, authProvider);
+        writeObject(out, sslOptions);
+        writeObject(out, poolingOptions);
+        writeObject(out, sockOptions);
+        writeObject(out, nettyOptions);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        fetchSize = (Integer)in.readObject();
+        readConsistency = (ConsistencyLevel)in.readObject();
+        writeConsistency = (ConsistencyLevel)in.readObject();
+        user = U.readString(in);
+        pwd = U.readString(in);
+        port = (Integer)in.readObject();
+        contactPoints = (List<InetAddress>)in.readObject();
+        contactPointsWithPorts = (List<InetSocketAddress>)in.readObject();
+        maxSchemaAgreementWaitSeconds = (Integer)in.readObject();
+        protoVer = (Integer)in.readObject();
+        compression = U.readString(in);
+        useSSL = (Boolean)in.readObject();
+        collectMetrix = (Boolean)in.readObject();
+        jmxReporting = (Boolean)in.readObject();
+        creds = (Credentials)in.readObject();
+        loadBalancingPlc = (LoadBalancingPolicy)readObject(in);
+        reconnectionPlc = (ReconnectionPolicy)readObject(in);
+        addrTranslator = (AddressTranslator)readObject(in);
+        speculativeExecutionPlc = (SpeculativeExecutionPolicy)readObject(in);
+        authProvider = (AuthProvider)readObject(in);
+        sslOptions = (SSLOptions)readObject(in);
+        poolingOptions = (PoolingOptions)readObject(in);
+        sockOptions = (SocketOptions)readObject(in);
+        nettyOptions = (NettyOptions)readObject(in);
+    }
+
+    /**
+     * Helper method used to serialize class members
+     * @param out the stream to write the object to
+     * @param obj the object to be written
+     * @throws IOException Includes any I/O exceptions that may occur
+     */
+    private void writeObject(ObjectOutput out, Object obj) throws IOException {
+        out.writeObject(obj == null || !(obj instanceof Serializable) ? NULL_OBJECT : obj);
+    }
+
+    /**
+     * Helper method used to deserialize class members
+     * @param in the stream to read data from in order to restore the object
+     * @throws IOException Includes any I/O exceptions that may occur
+     * @throws ClassNotFoundException If the class for an object being restored cannot be found
+     * @return deserialized object
+     */
+    private Object readObject(ObjectInput in) throws IOException, ClassNotFoundException {
+        Object obj = in.readObject();
+        return NULL_OBJECT.equals(obj) ? null : obj;
+    }
+
     /**
      * Parses consistency level provided as string.
      *
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
similarity index 95%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
index 9d0710e..46ebdc5 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
@@ -21,6 +21,9 @@
  * Simple implementation of {@link Credentials} which just uses its constructor to hold user/password values.
  */
 public class PlainCredentials implements Credentials {
+    /** */
+    private static final long serialVersionUID = 0L;
+
     /** User name. */
     private String user;
 
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
similarity index 79%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
index 393dbe4..c614abf 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
@@ -20,7 +20,10 @@
 import java.beans.PropertyDescriptor;
 import java.util.LinkedList;
 import java.util.List;
+
+import org.apache.ignite.IgniteException;
 import org.apache.ignite.cache.affinity.AffinityKeyMapped;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -55,8 +58,11 @@
     public KeyPersistenceSettings(Element el) {
         super(el);
 
-        if (!PersistenceStrategy.POJO.equals(getStrategy()))
+        if (PersistenceStrategy.POJO != getStrategy()) {
+            init();
+
             return;
+        }
 
         NodeList keyElem = el.getElementsByTagName(PARTITION_KEY_ELEMENT);
 
@@ -84,6 +90,8 @@
         fields.addAll(clusterKeyFields);
 
         checkDuplicates(fields);
+
+        init();
     }
 
     /** {@inheritDoc} */
@@ -104,7 +112,7 @@
             if (partKey.length() != 0)
                 partKey.append(", ");
 
-            partKey.append(column);
+            partKey.append("\"").append(column).append("\"");
         }
 
         StringBuilder clusterKey = new StringBuilder();
@@ -115,13 +123,13 @@
                 if (clusterKey.length() != 0)
                     clusterKey.append(", ");
 
-                clusterKey.append(column);
+                clusterKey.append("\"").append(column).append("\"");
             }
         }
 
         return clusterKey.length() == 0 ?
-            "  primary key ((" + partKey.toString() + "))" :
-            "  primary key ((" + partKey.toString() + "), " + clusterKey.toString() + ")";
+            "  primary key ((" + partKey + "))" :
+            "  primary key ((" + partKey + "), " + clusterKey + ")";
     }
 
     /**
@@ -141,12 +149,12 @@
             if (builder.length() != 0)
                 builder.append(", ");
 
-            boolean asc = PojoKeyField.SortOrder.ASC.equals(sortOrder);
+            boolean asc = PojoKeyField.SortOrder.ASC == sortOrder;
 
-            builder.append(field.getColumn()).append(" ").append(asc ? "asc" : "desc");
+            builder.append("\"").append(field.getColumn()).append("\" ").append(asc ? "asc" : "desc");
         }
 
-        return builder.length() == 0 ? null : "clustering order by (" + builder.toString() + ")";
+        return builder.length() == 0 ? null : "clustering order by (" + builder + ")";
     }
 
     /** {@inheritDoc} */
@@ -162,7 +170,7 @@
     private List<String> getPartitionKeyColumns() {
         List<String> cols = new LinkedList<>();
 
-        if (PersistenceStrategy.BLOB.equals(getStrategy()) || PersistenceStrategy.PRIMITIVE.equals(getStrategy())) {
+        if (PersistenceStrategy.BLOB == getStrategy() || PersistenceStrategy.PRIMITIVE == getStrategy()) {
             cols.add(getColumn());
             return cols;
         }
@@ -205,8 +213,15 @@
             return list;
 
         if (el == null) {
-            for (PropertyDescriptor descriptor : descriptors)
-                list.add(new PojoKeyField(descriptor));
+            for (PropertyDescriptor desc : descriptors) {
+                boolean valid = desc.getWriteMethod() != null ||
+                        desc.getReadMethod().getAnnotation(QuerySqlField.class) != null ||
+                        desc.getReadMethod().getAnnotation(AffinityKeyMapped.class) != null;
+
+                // Skip POJO field if it's read-only and is not annotated with @QuerySqlField or @AffinityKeyMapped.
+                if (valid)
+                    list.add(new PojoKeyField(desc));
+            }
 
             return list;
         }
@@ -217,7 +232,7 @@
 
         if (cnt == 0) {
             throw new IllegalArgumentException("Incorrect configuration of Cassandra key persistence settings, " +
-                "no cluster key fields specified inside '" + PARTITION_KEY_ELEMENT + "/" +
+                "no key fields specified inside '" + PARTITION_KEY_ELEMENT + "/" +
                 CLUSTER_KEY_ELEMENT + "' element");
         }
 
@@ -244,9 +259,25 @@
         List<PropertyDescriptor> primitivePropDescriptors = PropertyMappingHelper.getPojoPropertyDescriptors(getJavaClass(),
             AffinityKeyMapped.class, true);
 
-        return primitivePropDescriptors != null && !primitivePropDescriptors.isEmpty() ?
-            primitivePropDescriptors :
-            PropertyMappingHelper.getPojoPropertyDescriptors(getJavaClass(), true);
+        primitivePropDescriptors = primitivePropDescriptors != null && !primitivePropDescriptors.isEmpty() ?
+            primitivePropDescriptors : PropertyMappingHelper.getPojoPropertyDescriptors(getJavaClass(), true);
+
+        boolean valid = false;
+
+        for (PropertyDescriptor desc : primitivePropDescriptors) {
+            if (desc.getWriteMethod() != null) {
+                valid = true;
+
+                break;
+            }
+        }
+
+        if (!valid) {
+            throw new IgniteException("Partition key can't have only calculated read-only fields, there should be " +
+                    "some fields with setter method");
+        }
+
+        return primitivePropDescriptors;
     }
 
     /**
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
similarity index 78%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
index 2c43ed4..cb968b5 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
@@ -25,11 +25,15 @@
 import java.io.InputStreamReader;
 import java.io.Serializable;
 import java.io.StringReader;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
+import java.util.Collections;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
 import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.springframework.core.io.Resource;
@@ -101,6 +105,9 @@
     /** Persistence settings for Ignite cache values. */
     private ValuePersistenceSettings valPersistenceSettings;
 
+    /** List of Cassandra table columns */
+    private List<String> tableColumns;
+
     /**
      * Constructs Ignite cache key/value persistence settings.
      *
@@ -177,16 +184,6 @@
     }
 
     /**
-     * Returns full name of Cassandra table to use (including keyspace).
-     *
-     * @return full table name in format "keyspace.table".
-     */
-    public String getTableFullName()
-    {
-        return keyspace + "." + tbl;
-    }
-
-    /**
      * Returns persistence settings for Ignite cache keys.
      *
      * @return keys persistence settings.
@@ -249,7 +246,7 @@
      */
     public String getKeyspaceDDLStatement() {
         StringBuilder builder = new StringBuilder();
-        builder.append("create keyspace if not exists ").append(keyspace);
+        builder.append("create keyspace if not exists \"").append(keyspace).append("\"");
 
         if (keyspaceOptions != null) {
             if (!keyspaceOptions.trim().toLowerCase().startsWith("with"))
@@ -264,12 +261,31 @@
     }
 
     /**
+     * Returns column names for Cassandra table.
+     *
+     * @return Column names.
+     */
+    public List<String> getTableColumns() {
+        return tableColumns;
+    }
+
+    /**
      * Returns DDL statement to create Cassandra table.
      *
+     * @param table Table name.
      * @return Table DDL statement.
      */
-    public String getTableDDLStatement() {
-        String colsDDL = keyPersistenceSettings.getTableColumnsDDL() + ",\n" + valPersistenceSettings.getTableColumnsDDL();
+    public String getTableDDLStatement(String table) {
+        if (table == null || table.trim().isEmpty())
+            throw new IllegalArgumentException("Table name should be specified");
+
+        String keyColumnsDDL = keyPersistenceSettings.getTableColumnsDDL();
+        String valColumnsDDL = valPersistenceSettings.getTableColumnsDDL(new HashSet<>(keyPersistenceSettings.getTableColumns()));
+
+        String colsDDL = keyColumnsDDL;
+
+        if (valColumnsDDL != null && !valColumnsDDL.trim().isEmpty())
+            colsDDL += ",\n" + valColumnsDDL;
 
         String primaryKeyDDL = keyPersistenceSettings.getPrimaryKeyDDL();
 
@@ -285,7 +301,7 @@
 
         StringBuilder builder = new StringBuilder();
 
-        builder.append("create table if not exists ").append(keyspace).append(".").append(tbl);
+        builder.append("create table if not exists \"").append(keyspace).append("\".\"").append(table).append("\"");
         builder.append("\n(\n").append(colsDDL).append(",\n").append(primaryKeyDDL).append("\n)");
 
         if (!optionsDDL.isEmpty())
@@ -299,16 +315,18 @@
     /**
      * Returns DDL statements to create Cassandra table secondary indexes.
      *
+     * @param table Table name.
      * @return DDL statements to create secondary indexes.
      */
-    public List<String> getIndexDDLStatements() {
+    public List<String> getIndexDDLStatements(String table) {
         List<String> idxDDLs = new LinkedList<>();
 
+        Set<String> keyColumns = new HashSet<>(keyPersistenceSettings.getTableColumns());
         List<PojoField> fields = valPersistenceSettings.getFields();
 
         for (PojoField field : fields) {
-            if (((PojoValueField)field).isIndexed())
-                idxDDLs.add(((PojoValueField)field).getIndexDDL(keyspace, tbl));
+            if (!keyColumns.contains(field.getColumn()) && ((PojoValueField)field).isIndexed())
+                idxDDLs.add(((PojoValueField)field).getIndexDDL(keyspace, table));
         }
 
         return idxDDLs;
@@ -360,7 +378,7 @@
         try {
             return Integer.parseInt(val);
         }
-        catch (NumberFormatException e) {
+        catch (NumberFormatException ignored) {
             throw new IllegalArgumentException("Incorrect value '" + val + "' specified for '" + attr + "' attribute");
         }
     }
@@ -396,13 +414,8 @@
                 "' attribute should be specified");
         }
 
-        if (!root.hasAttribute(TABLE_ATTR)) {
-            throw new IllegalArgumentException("Incorrect persistence settings '" + TABLE_ATTR +
-                "' attribute should be specified");
-        }
-
         keyspace = root.getAttribute(KEYSPACE_ATTR).trim();
-        tbl = root.getAttribute(TABLE_ATTR).trim();
+        tbl = root.hasAttribute(TABLE_ATTR) ? root.getAttribute(TABLE_ATTR).trim() : null;
 
         if (root.hasAttribute(TTL_ATTR))
             ttl = extractIntAttribute(root, TTL_ATTR);
@@ -451,28 +464,63 @@
         List<PojoField> keyFields = keyPersistenceSettings.getFields();
         List<PojoField> valFields = valPersistenceSettings.getFields();
 
-        if (PersistenceStrategy.POJO.equals(keyPersistenceSettings.getStrategy()) &&
+        if (PersistenceStrategy.POJO == keyPersistenceSettings.getStrategy() &&
             (keyFields == null || keyFields.isEmpty())) {
             throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
                 "there are no key fields found");
         }
 
-        if (PersistenceStrategy.POJO.equals(valPersistenceSettings.getStrategy()) &&
+        if (PersistenceStrategy.POJO == valPersistenceSettings.getStrategy() &&
             (valFields == null || valFields.isEmpty())) {
             throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
                 "there are no value fields found");
         }
 
-        if (keyFields == null || keyFields.isEmpty() || valFields == null || valFields.isEmpty())
-            return;
+        // Validating aliases compatibility - fields having different names, but mapped to the same Cassandra table column.
+        if (valFields != null && !valFields.isEmpty()) {
+            String keyColumn = keyPersistenceSettings.getColumn();
+            Class keyClass = keyPersistenceSettings.getJavaClass();
 
-        for (PojoField keyField : keyFields) {
-            for (PojoField valField : valFields) {
-                if (keyField.getColumn().equals(valField.getColumn())) {
-                    throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                        "key column '" + keyField.getColumn() + "' also specified as a value column");
+            if (keyColumn != null && !keyColumn.isEmpty()) {
+                for (PojoField valField : valFields) {
+                    if (keyColumn.equals(valField.getColumn()) &&
+                            !CassandraHelper.isCassandraCompatibleTypes(keyClass, valField.getJavaClass())) {
+                        throw new IllegalArgumentException("Value field '" + valField.getName() + "' shares the same " +
+                                "Cassandra table column '" + keyColumn + "' with key, but their Java classes are " +
+                                "different. Fields sharing the same column should have the same Java class as their " +
+                                "type or should be mapped to the same Cassandra primitive type.");
+                    }
+                }
+            }
+
+            if (keyFields != null && !keyFields.isEmpty()) {
+                for (PojoField keyField : keyFields) {
+                    for (PojoField valField : valFields) {
+                        if (keyField.getColumn().equals(valField.getColumn()) &&
+                                !CassandraHelper.isCassandraCompatibleTypes(keyField.getJavaClass(), valField.getJavaClass())) {
+                            throw new IllegalArgumentException("Value field '" + valField.getName() + "' shares the same " +
+                                    "Cassandra table column '" + keyColumn + "' with key field '" + keyField.getName() + "', " +
+                                    "but their Java classes are different. Fields sharing the same column should have " +
+                                    "the same Java class as their type or should be mapped to the same Cassandra " +
+                                    "primitive type.");
+                        }
+                    }
                 }
             }
         }
+
+        tableColumns = new LinkedList<>();
+
+        for (String column : keyPersistenceSettings.getTableColumns()) {
+            if (!tableColumns.contains(column))
+                tableColumns.add(column);
+        }
+
+        for (String column : valPersistenceSettings.getTableColumns()) {
+            if (!tableColumns.contains(column))
+                tableColumns.add(column);
+        }
+
+        tableColumns = Collections.unmodifiableList(tableColumns);
     }
 }
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
new file mode 100644
index 0000000..e287a4e
--- /dev/null
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
@@ -0,0 +1,462 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.store.cassandra.persistence;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.Row;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
+import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
+
+/**
+ * Intermediate layer between persistent store (Cassandra) and Ignite cache key/value classes.
+ * Handles  all the mappings to/from Java classes into Cassandra and responsible for all the details
+ * of how Java objects should be written/loaded to/from Cassandra.
+ */
+public class PersistenceController {
+    /** Ignite cache key/value persistence settings. */
+    private final KeyValuePersistenceSettings persistenceSettings;
+
+    /** List of key unique POJO fields (skipping aliases pointing to the same Cassandra table column). */
+    private final List<PojoField> keyUniquePojoFields;
+
+    /** List of value unique POJO fields (skipping aliases pointing to the same Cassandra table column). */
+    private final List<PojoField> valUniquePojoFields;
+
+    /** CQL statement template to insert row into Cassandra table. */
+    private final String writeStatementTempl;
+
+    /** CQL statement template to delete row from Cassandra table. */
+    private final String delStatementTempl;
+
+    /** CQL statement template to select value fields from Cassandra table. */
+    private final String loadStatementTempl;
+
+    /** CQL statement template to select key/value fields from Cassandra table. */
+    private final String loadWithKeyFieldsStatementTempl;
+
+    /** CQL statements to insert row into Cassandra table. */
+    private volatile Map<String, String> writeStatements = new HashMap<>();
+
+    /** CQL statements to delete row from Cassandra table. */
+    private volatile Map<String, String> delStatements = new HashMap<>();
+
+    /** CQL statements to select value fields from Cassandra table. */
+    private volatile Map<String, String> loadStatements = new HashMap<>();
+
+    /** CQL statements to select key/value fields from Cassandra table. */
+    private volatile Map<String, String> loadWithKeyFieldsStatements = new HashMap<>();
+
+    /**
+     * Constructs persistence controller from Ignite cache persistence settings.
+     *
+     * @param settings persistence settings.
+     */
+    public PersistenceController(KeyValuePersistenceSettings settings) {
+        if (settings == null)
+            throw new IllegalArgumentException("Persistent settings can't be null");
+
+        persistenceSettings = settings;
+
+        String[] loadStatements = prepareLoadStatements();
+
+        loadWithKeyFieldsStatementTempl = loadStatements[0];
+        loadStatementTempl = loadStatements[1];
+        writeStatementTempl = prepareWriteStatement();
+        delStatementTempl = prepareDeleteStatement();
+
+        keyUniquePojoFields = settings.getKeyPersistenceSettings().cassandraUniqueFields();
+
+        List<PojoField> _valUniquePojoFields = settings.getValuePersistenceSettings().cassandraUniqueFields();
+
+        if (_valUniquePojoFields == null || _valUniquePojoFields.isEmpty()) {
+            valUniquePojoFields = _valUniquePojoFields;
+
+            return;
+        }
+
+        List<String> keyColumns = new LinkedList<>();
+
+        if (keyUniquePojoFields == null)
+            keyColumns.add(settings.getKeyPersistenceSettings().getColumn());
+        else {
+            for (PojoField field : keyUniquePojoFields)
+                keyColumns.add(field.getColumn());
+        }
+
+        List<PojoField> fields = new LinkedList<>(_valUniquePojoFields);
+
+        for (String column : keyColumns) {
+            for (int i = 0; i < fields.size(); i++) {
+                if (column.equals(fields.get(i).getColumn())) {
+                    fields.remove(i);
+                    break;
+                }
+            }
+        }
+
+        valUniquePojoFields = fields.isEmpty() ? null : Collections.unmodifiableList(fields);
+    }
+
+    /**
+     * Returns Ignite cache persistence settings.
+     *
+     * @return persistence settings.
+     */
+    public KeyValuePersistenceSettings getPersistenceSettings() {
+        return persistenceSettings;
+    }
+
+    /**
+     * Returns CQL statement to insert row into Cassandra table.
+     *
+     * @param table Table name.
+     * @return CQL statement.
+     */
+    public String getWriteStatement(String table) {
+        return getStatement(table, writeStatementTempl, writeStatements);
+    }
+
+    /**
+     * Returns CQL statement to delete row from Cassandra table.
+     *
+     * @param table Table name.
+     * @return CQL statement.
+     */
+    public String getDeleteStatement(String table) {
+        return getStatement(table, delStatementTempl, delStatements);
+    }
+
+    /**
+     * Returns CQL statement to select key/value fields from Cassandra table.
+     *
+     * @param table Table name.
+     * @param includeKeyFields whether to include/exclude key fields from the returned row.
+     *
+     * @return CQL statement.
+     */
+    public String getLoadStatement(String table, boolean includeKeyFields) {
+        return includeKeyFields ?
+            getStatement(table, loadWithKeyFieldsStatementTempl, loadWithKeyFieldsStatements) :
+            getStatement(table, loadStatementTempl, loadStatements);
+    }
+
+    /**
+     * Binds Ignite cache key object to {@link PreparedStatement}.
+     *
+     * @param statement statement to which key object should be bind.
+     * @param key key object.
+     *
+     * @return statement with bounded key.
+     */
+    public BoundStatement bindKey(PreparedStatement statement, Object key) {
+        PersistenceSettings settings = persistenceSettings.getKeyPersistenceSettings();
+
+        Object[] values = PersistenceStrategy.POJO != settings.getStrategy() ?
+            new Object[1] : new Object[keyUniquePojoFields.size()];
+
+        bindValues(settings.getStrategy(), settings.getSerializer(), keyUniquePojoFields, key, values, 0);
+
+        return statement.bind(values);
+    }
+
+    /**
+     * Binds Ignite cache key and value object to {@link com.datastax.driver.core.PreparedStatement}.
+     *
+     * @param statement statement to which key and value object should be bind.
+     * @param key key object.
+     * @param val value object.
+     *
+     * @return statement with bounded key and value.
+     */
+    public BoundStatement bindKeyValue(PreparedStatement statement, Object key, Object val) {
+        Object[] values = new Object[persistenceSettings.getTableColumns().size()];
+
+        PersistenceSettings keySettings = persistenceSettings.getKeyPersistenceSettings();
+        PersistenceSettings valSettings = persistenceSettings.getValuePersistenceSettings();
+
+        int offset = bindValues(keySettings.getStrategy(), keySettings.getSerializer(), keyUniquePojoFields, key, values, 0);
+        bindValues(valSettings.getStrategy(), valSettings.getSerializer(), valUniquePojoFields, val, values, offset);
+
+        return statement.bind(values);
+    }
+
+    /**
+     * Builds Ignite cache key object from returned Cassandra table row.
+     *
+     * @param row Cassandra table row.
+     *
+     * @return key object.
+     */
+    @SuppressWarnings("UnusedDeclaration")
+    public Object buildKeyObject(Row row) {
+        return buildObject(row, persistenceSettings.getKeyPersistenceSettings());
+    }
+
+    /**
+     * Builds Ignite cache value object from Cassandra table row .
+     *
+     * @param row Cassandra table row.
+     *
+     * @return value object.
+     */
+    public Object buildValueObject(Row row) {
+        return buildObject(row, persistenceSettings.getValuePersistenceSettings());
+    }
+
+    /**
+     * Service method to prepare CQL write statement.
+     *
+     * @return CQL write statement.
+     */
+    private String prepareWriteStatement() {
+        Collection<String> cols = persistenceSettings.getTableColumns();
+
+        StringBuilder colsList = new StringBuilder();
+        StringBuilder questionsList = new StringBuilder();
+
+        for (String column : cols) {
+            if (colsList.length() != 0) {
+                colsList.append(", ");
+                questionsList.append(",");
+            }
+
+            colsList.append("\"").append(column).append("\"");
+            questionsList.append("?");
+        }
+
+        String statement = "insert into \"" + persistenceSettings.getKeyspace() + "\".\"%1$s" +
+            "\" (" + colsList + ") values (" + questionsList + ")";
+
+        if (persistenceSettings.getTTL() != null)
+            statement += " using ttl " + persistenceSettings.getTTL();
+
+        return statement + ";";
+    }
+
+    /**
+     * Service method to prepare CQL delete statement.
+     *
+     * @return CQL write statement.
+     */
+    private String prepareDeleteStatement() {
+        Collection<String> cols = persistenceSettings.getKeyPersistenceSettings().getTableColumns();
+
+        StringBuilder statement = new StringBuilder();
+
+        for (String column : cols) {
+            if (statement.length() != 0)
+                statement.append(" and ");
+
+            statement.append("\"").append(column).append("\"=?");
+        }
+
+        statement.append(";");
+
+        return "delete from \"" + persistenceSettings.getKeyspace() + "\".\"%1$s\" where " + statement;
+    }
+
+    /**
+     * Service method to prepare CQL load statements including and excluding key columns.
+     *
+     * @return array having two CQL statements (including and excluding key columns).
+     */
+    private String[] prepareLoadStatements() {
+        PersistenceSettings settings = persistenceSettings.getKeyPersistenceSettings();
+        boolean pojoStrategy = PersistenceStrategy.POJO == settings.getStrategy();
+        Collection<String> keyCols = settings.getTableColumns();
+        StringBuilder hdrWithKeyFields = new StringBuilder();
+
+
+        for (String column : keyCols) {
+            // omit calculated fields in load statement
+            if (pojoStrategy && settings.getFieldByColumn(column).calculatedField())
+                continue;
+
+            if (hdrWithKeyFields.length() > 0)
+                hdrWithKeyFields.append(", ");
+
+            hdrWithKeyFields.append("\"").append(column).append("\"");
+        }
+
+        settings = persistenceSettings.getValuePersistenceSettings();
+        pojoStrategy = PersistenceStrategy.POJO == settings.getStrategy();
+        Collection<String> valCols = settings.getTableColumns();
+        StringBuilder hdr = new StringBuilder();
+
+        for (String column : valCols) {
+            // omit calculated fields in load statement
+            if (pojoStrategy && settings.getFieldByColumn(column).calculatedField())
+                continue;
+
+            if (hdr.length() > 0)
+                hdr.append(", ");
+
+            hdr.append("\"").append(column).append("\"");
+
+            if (!keyCols.contains(column))
+                hdrWithKeyFields.append(", \"").append(column).append("\"");
+        }
+
+        hdrWithKeyFields.insert(0, "select ");
+        hdr.insert(0, "select ");
+
+        StringBuilder statement = new StringBuilder();
+
+        statement.append(" from \"");
+        statement.append(persistenceSettings.getKeyspace());
+        statement.append("\".\"%1$s");
+        statement.append("\" where ");
+
+        int i = 0;
+
+        for (String column : keyCols) {
+            if (i > 0)
+                statement.append(" and ");
+
+            statement.append("\"").append(column).append("\"=?");
+            i++;
+        }
+
+        statement.append(";");
+
+        return new String[] {hdrWithKeyFields + statement.toString(), hdr + statement.toString()};
+    }
+
+    /**
+     * @param table Table.
+     * @param template Template.
+     * @param statements Statements.
+     * @return Statement.
+     */
+    private String getStatement(final String table, final String template, final Map<String, String> statements) {
+        //noinspection SynchronizationOnLocalVariableOrMethodParameter
+        synchronized (statements) {
+            String st = statements.get(table);
+
+            if (st == null) {
+                st = String.format(template, table);
+                statements.put(table, st);
+            }
+
+            return st;
+        }
+    }
+
+    /**
+     * Builds object from Cassandra table row.
+     *
+     * @param row Cassandra table row.
+     * @param settings persistence settings to use.
+     *
+     * @return object.
+     */
+    private Object buildObject(Row row, PersistenceSettings settings) {
+        if (row == null)
+            return null;
+
+        PersistenceStrategy stg = settings.getStrategy();
+
+        Class clazz = settings.getJavaClass();
+        String col = settings.getColumn();
+
+        if (PersistenceStrategy.PRIMITIVE == stg)
+            return PropertyMappingHelper.getCassandraColumnValue(row, col, clazz, null);
+
+        if (PersistenceStrategy.BLOB == stg)
+            return settings.getSerializer().deserialize(row.getBytes(col));
+
+        List<PojoField> fields = settings.getFields();
+
+        Object obj;
+
+        try {
+            obj = clazz.newInstance();
+        }
+        catch (Throwable e) {
+            throw new IgniteException("Failed to instantiate object of type '" + clazz.getName() + "' using reflection", e);
+        }
+
+        for (PojoField field : fields) {
+            if (!field.calculatedField())
+                field.setValueFromRow(row, obj, settings.getSerializer());
+        }
+
+        return obj;
+    }
+
+    /**
+     * Extracts field values from POJO object, converts into Java types
+     * which could be mapped to Cassandra types and stores them inside provided values
+     * array starting from specified offset.
+     *
+     * @param stgy Persistence strategy to use.
+     * @param serializer Serializer to use for BLOBs.
+     * @param fields Fields who's values should be extracted.
+     * @param obj Object instance who's field values should be extracted.
+     * @param values Array to store values.
+     * @param offset Offset starting from which to store fields values in the provided values array.
+     *
+     * @return next offset
+     */
+    private int bindValues(PersistenceStrategy stgy, Serializer serializer, List<PojoField> fields, Object obj,
+                            Object[] values, int offset) {
+        if (PersistenceStrategy.PRIMITIVE == stgy) {
+            if (PropertyMappingHelper.getCassandraType(obj.getClass()) == null ||
+                obj.getClass().equals(ByteBuffer.class) || obj instanceof byte[]) {
+                throw new IllegalArgumentException("Couldn't deserialize instance of class '" +
+                    obj.getClass().getName() + "' using PRIMITIVE strategy. Please use BLOB strategy for this case.");
+            }
+
+            values[offset] = obj;
+
+            return ++offset;
+        }
+
+        if (PersistenceStrategy.BLOB == stgy) {
+            values[offset] = serializer.serialize(obj);
+
+            return ++offset;
+        }
+
+        if (fields == null || fields.isEmpty())
+            return offset;
+
+        for (PojoField field : fields) {
+            Object val = field.getValueFromObject(obj, serializer);
+
+            if (val instanceof byte[])
+                val = ByteBuffer.wrap((byte[]) val);
+
+            values[offset] = val;
+
+            offset++;
+        }
+
+        return offset;
+    }
+}
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
similarity index 68%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
index 20d790a..f22c0a4 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
@@ -21,8 +21,14 @@
 import java.beans.PropertyDescriptor;
 import java.io.Serializable;
 import java.nio.ByteBuffer;
+import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
+import java.util.Collections;
+
 import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
 import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
 import org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer;
 import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
@@ -58,6 +64,15 @@
     /** Serializer for BLOBs. */
     private Serializer serializer = new JavaSerializer();
 
+    /** List of Cassandra table columns */
+    private List<String> tableColumns;
+
+    /**
+     * List of POJO fields having unique mapping to Cassandra columns - skipping aliases pointing
+     *  to the same Cassandra table column.
+     */
+    private List<PojoField> casUniqueFields;
+
     /**
      * Extracts property descriptor from the descriptors list by its name.
      *
@@ -96,11 +111,11 @@
         try {
             stgy = PersistenceStrategy.valueOf(el.getAttribute(STRATEGY_ATTR).trim().toUpperCase());
         }
-        catch (IllegalArgumentException e) {
+        catch (IllegalArgumentException ignored) {
             throw new IllegalArgumentException("Incorrect persistence strategy specified: " + el.getAttribute(STRATEGY_ATTR));
         }
 
-        if (!el.hasAttribute(CLASS_ATTR) && !PersistenceStrategy.BLOB.equals(stgy)) {
+        if (!el.hasAttribute(CLASS_ATTR) && PersistenceStrategy.BLOB != stgy) {
             throw new IllegalArgumentException("DOM element representing key/value persistence object should have '" +
                 CLASS_ATTR + "' attribute or have BLOB persistence strategy");
         }
@@ -113,19 +128,19 @@
                 "for Cassandra persistence", e);
         }
 
-        if (!PersistenceStrategy.BLOB.equals(stgy) &&
+        if (PersistenceStrategy.BLOB != stgy &&
             (ByteBuffer.class.equals(javaCls) || byte[].class.equals(javaCls))) {
             throw new IllegalArgumentException("Java class '" + el.getAttribute(CLASS_ATTR) + "' " +
                 "specified could only be persisted using BLOB persistence strategy");
         }
 
-        if (PersistenceStrategy.PRIMITIVE.equals(stgy) &&
+        if (PersistenceStrategy.PRIMITIVE == stgy &&
             PropertyMappingHelper.getCassandraType(javaCls) == null) {
             throw new IllegalArgumentException("Current implementation doesn't support persisting '" +
                 javaCls.getName() + "' object using PRIMITIVE strategy");
         }
 
-        if (PersistenceStrategy.POJO.equals(stgy)) {
+        if (PersistenceStrategy.POJO == stgy) {
             if (javaCls == null)
                 throw new IllegalStateException("Object java class should be specified for POJO persistence strategy");
 
@@ -139,7 +154,7 @@
         }
 
         if (el.hasAttribute(COLUMN_ATTR)) {
-            if (!PersistenceStrategy.BLOB.equals(stgy) && !PersistenceStrategy.PRIMITIVE.equals(stgy)) {
+            if (PersistenceStrategy.BLOB != stgy && PersistenceStrategy.PRIMITIVE != stgy) {
                 throw new IllegalArgumentException("Incorrect configuration of Cassandra key/value persistence settings, " +
                     "'" + COLUMN_ATTR + "' attribute is only applicable for PRIMITIVE or BLOB strategy");
             }
@@ -148,7 +163,7 @@
         }
 
         if (el.hasAttribute(SERIALIZER_ATTR)) {
-            if (!PersistenceStrategy.BLOB.equals(stgy) && !PersistenceStrategy.POJO.equals(stgy)) {
+            if (PersistenceStrategy.BLOB != stgy && PersistenceStrategy.POJO != stgy) {
                 throw new IllegalArgumentException("Incorrect configuration of Cassandra key/value persistence settings, " +
                     "'" + SERIALIZER_ATTR + "' attribute is only applicable for BLOB and POJO strategies");
             }
@@ -164,7 +179,7 @@
             serializer = (Serializer)obj;
         }
 
-        if ((PersistenceStrategy.BLOB.equals(stgy) || PersistenceStrategy.PRIMITIVE.equals(stgy)) && col == null)
+        if ((PersistenceStrategy.BLOB == stgy || PersistenceStrategy.PRIMITIVE == stgy) && col == null)
             col = defaultColumnName();
     }
 
@@ -206,36 +221,99 @@
     }
 
     /**
-     * Returns list of POJO fields to be persisted.
+     * Returns a list of POJO fields to be persisted.
      *
      * @return list of fields.
      */
     public abstract List<PojoField> getFields();
 
     /**
+     * Returns POJO field by Cassandra table column name.
+     *
+     * @param column column name.
+     *
+     * @return POJO field or null if not exists.
+     */
+    public PojoField getFieldByColumn(String column) {
+        List<PojoField> fields = getFields();
+
+        if (fields == null || fields.isEmpty())
+            return null;
+
+        for (PojoField field : fields) {
+            if (field.getColumn().equals(column))
+                return field;
+        }
+
+        return null;
+    }
+
+    /**
+     * List of POJO fields having unique mapping to Cassandra columns - skipping aliases pointing
+     * to the same Cassandra table column.
+     *
+     * @return List of fields.
+     */
+    public List<PojoField> cassandraUniqueFields() {
+        return casUniqueFields;
+    }
+
+    /**
+     * Returns set of database column names, used to persist field values
+     *
+     * @return set of database column names
+     */
+    public List<String> getTableColumns() {
+        return tableColumns;
+    }
+
+    /**
      * Returns Cassandra table columns DDL, corresponding to POJO fields which should be persisted.
      *
-     * @return DDL statement for Cassandra table fields
+     * @return DDL statement for Cassandra table fields.
      */
     public String getTableColumnsDDL() {
-        if (PersistenceStrategy.BLOB.equals(stgy))
-            return "  " + col + " " + DataType.Name.BLOB.toString();
+        return getTableColumnsDDL(null);
+    }
 
-        if (PersistenceStrategy.PRIMITIVE.equals(stgy))
-            return "  " + col + " " + PropertyMappingHelper.getCassandraType(javaCls);
+    /**
+     * Returns Cassandra table columns DDL, corresponding to POJO fields which should be persisted.
+     *
+     * @param ignoreColumns Table columns to ignore (exclude) from DDL.
+     * @return DDL statement for Cassandra table fields.
+     */
+    public String getTableColumnsDDL(Set<String> ignoreColumns) {
+        if (PersistenceStrategy.BLOB == stgy)
+            return "  \"" + col + "\" " + DataType.Name.BLOB.toString();
+
+        if (PersistenceStrategy.PRIMITIVE == stgy)
+            return "  \"" + col + "\" " + PropertyMappingHelper.getCassandraType(javaCls);
+
+        List<PojoField> fields = getFields();
+
+        if (fields == null || fields.isEmpty()) {
+            throw new IllegalStateException("There are no POJO fields found for '" + javaCls.toString()
+                + "' class to be presented as a Cassandra primary key");
+        }
+
+        // Accumulating already processed columns in the set, to prevent duplicating columns
+        // shared by two different POJO fields.
+        Set<String> processedColumns = new HashSet<>();
 
         StringBuilder builder = new StringBuilder();
 
-        for (PojoField field : getFields()) {
+        for (PojoField field : fields) {
+            if ((ignoreColumns != null && ignoreColumns.contains(field.getColumn())) ||
+                    processedColumns.contains(field.getColumn())) {
+                continue;
+            }
+
             if (builder.length() > 0)
                 builder.append(",\n");
 
             builder.append("  ").append(field.getColumnDDL());
-        }
 
-        if (builder.length() == 0) {
-            throw new IllegalStateException("There are no POJO fields found for '" + javaCls.toString()
-                + "' class to be presented as a Cassandra primary key");
+            processedColumns.add(field.getColumn());
         }
 
         return builder.toString();
@@ -249,9 +327,40 @@
     protected abstract String defaultColumnName();
 
     /**
-     * Checks if there are POJO filed with the same name or same Cassandra column specified in persistence settings
+     * Class instance initialization.
+     */
+    protected void init() {
+        if (getColumn() != null && !getColumn().trim().isEmpty()) {
+            tableColumns = new LinkedList<>();
+            tableColumns.add(getColumn());
+            tableColumns = Collections.unmodifiableList(tableColumns);
+
+            return;
+        }
+
+        List<PojoField> fields = getFields();
+
+        if (fields == null || fields.isEmpty())
+            return;
+
+        tableColumns = new LinkedList<>();
+        casUniqueFields = new LinkedList<>();
+
+        for (PojoField field : fields) {
+            if (!tableColumns.contains(field.getColumn())) {
+                tableColumns.add(field.getColumn());
+                casUniqueFields.add(field);
+            }
+        }
+
+        tableColumns = Collections.unmodifiableList(tableColumns);
+        casUniqueFields = Collections.unmodifiableList(casUniqueFields);
+    }
+
+    /**
+     * Checks if there are POJO filed with the same name or same Cassandra column specified in persistence settings.
      *
-     * @param fields list of fields to be persisted into Cassandra
+     * @param fields List of fields to be persisted into Cassandra.
      */
     protected void checkDuplicates(List<PojoField> fields) {
         if (fields == null || fields.isEmpty())
@@ -264,7 +373,7 @@
             for (PojoField field2 : fields) {
                 if (field1.getName().equals(field2.getName())) {
                     if (sameNames) {
-                        throw new IllegalArgumentException("Incorrect Cassandra key persistence settings, " +
+                        throw new IllegalArgumentException("Incorrect Cassandra persistence settings, " +
                             "two POJO fields with the same name '" + field1.getName() + "' specified");
                     }
 
@@ -272,9 +381,11 @@
                 }
 
                 if (field1.getColumn().equals(field2.getColumn())) {
-                    if (sameCols) {
-                        throw new IllegalArgumentException("Incorrect Cassandra persistence settings, " +
-                            "two POJO fields with the same column '" + field1.getColumn() + "' specified");
+                    if (sameCols && !CassandraHelper.isCassandraCompatibleTypes(field1.getJavaClass(), field2.getJavaClass())) {
+                        throw new IllegalArgumentException("Field '" + field1.getName() + "' shares the same Cassandra table " +
+                                "column '" + field1.getColumn() + "' with field '" + field2.getName() + "', but their Java " +
+                                "classes are different. Fields sharing the same column should have the same " +
+                                "Java class as their type or should be mapped to the same Cassandra primitive type.");
                     }
 
                     sameCols = true;
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
similarity index 79%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
index af569fd..99b96d5 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
@@ -21,6 +21,7 @@
 import com.datastax.driver.core.Row;
 import java.beans.PropertyDescriptor;
 import java.io.Serializable;
+import java.lang.reflect.Method;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
@@ -42,7 +43,7 @@
     private String name;
 
     /** Java class to which the field belongs. */
-    private Class javaCls;
+    private Class objJavaCls;
 
     /** Field column name in Cassandra table. */
     private String col;
@@ -50,6 +51,9 @@
     /** Field column DDL.  */
     private String colDDL;
 
+    /** Indicator for calculated field. */
+    private Boolean calculated;
+
     /** Field property descriptor. */
     private transient PropertyDescriptor desc;
 
@@ -82,13 +86,14 @@
     public PojoField(PropertyDescriptor desc) {
         this.name = desc.getName();
 
-        QuerySqlField sqlField = desc.getReadMethod() != null ?
-            desc.getReadMethod().getAnnotation(QuerySqlField.class) :
-            desc.getWriteMethod() == null ?
-                null :
-                desc.getWriteMethod().getAnnotation(QuerySqlField.class);
+        Method rdMthd = desc.getReadMethod();
 
-        this.col = sqlField != null && sqlField.name() != null ? sqlField.name() : name.toLowerCase();
+        QuerySqlField sqlField = rdMthd != null && rdMthd.getAnnotation(QuerySqlField.class) != null
+            ? rdMthd.getAnnotation(QuerySqlField.class)
+            : desc.getWriteMethod() == null ? null : desc.getWriteMethod().getAnnotation(QuerySqlField.class);
+
+        col = sqlField != null && sqlField.name() != null &&
+            !sqlField.name().trim().isEmpty() ? sqlField.name() : name.toLowerCase();
 
         init(desc);
 
@@ -104,6 +109,15 @@
     }
 
     /**
+     * Returns java class of the field.
+     *
+     * @return Java class.
+     */
+    public Class getJavaClass() {
+        return propDesc().getPropertyType();
+    }
+
+    /**
      * @return Cassandra table column name.
      */
     public String getColumn() {
@@ -118,6 +132,21 @@
     }
 
     /**
+     * Indicates if it's a calculated field - field which value just generated based on other field values.
+     * Such field will be stored in Cassandra as all other POJO fields, but it's value shouldn't be read from
+     * Cassandra - cause it's again just generated based on other field values. One of the good applications of such
+     * kind of fields - Cassandra materialized views build on top of other tables.
+     *
+     * @return {@code true} if it's auto generated field, {@code false} if not.
+     */
+    public boolean calculatedField() {
+        if (calculated != null)
+            return calculated;
+
+        return calculated = propDesc().getWriteMethod() == null;
+    }
+
+    /**
      * Gets field value as an object having Cassandra compatible type.
      * This it could be stored directly into Cassandra without any conversions.
      *
@@ -158,6 +187,9 @@
      * @param serializer {@link org.apache.ignite.cache.store.cassandra.serializer.Serializer} to use.
      */
     public void setValueFromRow(Row row, Object obj, Serializer serializer) {
+        if (calculatedField())
+            return;
+
         Object val = PropertyMappingHelper.getCassandraColumnValue(row, col, propDesc().getPropertyType(), serializer);
 
         try {
@@ -188,24 +220,18 @@
                 "' doesn't provide getter method");
         }
 
-        if (desc.getWriteMethod() == null) {
-            throw new IllegalArgumentException("Field '" + desc.getName() +
-                "' of POJO object instance of the class '" + desc.getPropertyType().getName() +
-                "' doesn't provide write method");
-        }
-
         if (!desc.getReadMethod().isAccessible())
             desc.getReadMethod().setAccessible(true);
 
-        if (!desc.getWriteMethod().isAccessible())
+        if (desc.getWriteMethod() != null && !desc.getWriteMethod().isAccessible())
             desc.getWriteMethod().setAccessible(true);
 
         DataType.Name cassandraType = PropertyMappingHelper.getCassandraType(desc.getPropertyType());
         cassandraType = cassandraType == null ? DataType.Name.BLOB : cassandraType;
 
-        this.javaCls = desc.getReadMethod().getDeclaringClass();
+        this.objJavaCls = desc.getReadMethod().getDeclaringClass();
         this.desc = desc;
-        this.colDDL = col + " " + cassandraType.toString();
+        this.colDDL = "\"" + col + "\" " + cassandraType.toString();
     }
 
     /**
@@ -214,6 +240,6 @@
      * @return Property descriptor
      */
     private PropertyDescriptor propDesc() {
-        return desc != null ? desc : (desc = PropertyMappingHelper.getPojoPropertyDescriptor(javaCls, name));
+        return desc != null ? desc : (desc = PropertyMappingHelper.getPojoPropertyDescriptor(objJavaCls, name));
     }
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
similarity index 91%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
index 4e86d74..6f42db2 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
@@ -40,7 +40,7 @@
     private static final String SORT_ATTR = "sort";
 
     /** Sort order. */
-    private SortOrder sortOrder = null;
+    private SortOrder sortOrder;
 
     /**
      * Constructs Ignite cache key POJO object descriptor.
@@ -79,12 +79,8 @@
         return sortOrder;
     }
 
-    /**
-     * Initializes descriptor from {@link QuerySqlField} annotation.
-     *
-     * @param sqlField {@link QuerySqlField} annotation.
-     */
-    protected void init(QuerySqlField sqlField) {
+    /** {@inheritDoc} */
+    @Override protected void init(QuerySqlField sqlField) {
         if (sqlField.descending())
             sortOrder = SortOrder.DESC;
     }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
similarity index 86%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
index c29f1db..fcdd408 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
@@ -87,16 +87,12 @@
         super(desc);
     }
 
-    /**
-     * Returns DDL for Cassandra columns corresponding to POJO field.
-     *
-     * @return columns DDL.
-     */
-    public String getColumnDDL() {
+    /** {@inheritDoc} */
+    @Override public String getColumnDDL() {
         String colDDL = super.getColumnDDL();
 
         if (isStatic != null && isStatic)
-            colDDL = colDDL + " static";
+            colDDL += " static";
 
         return colDDL;
     }
@@ -125,11 +121,11 @@
         StringBuilder builder = new StringBuilder();
 
         if (idxCls != null)
-            builder.append("create custom index if not exists on ").append(keyspace).append(".").append(tbl);
+            builder.append("create custom index if not exists on \"").append(keyspace).append("\".\"").append(tbl).append("\"");
         else
-            builder.append("create index if not exists on ").append(keyspace).append(".").append(tbl);
+            builder.append("create index if not exists on \"").append(keyspace).append("\".\"").append(tbl).append("\"");
 
-        builder.append(" (").append(getColumn()).append(")");
+        builder.append(" (\"").append(getColumn()).append("\")");
 
         if (idxCls != null)
             builder.append(" using '").append(idxCls).append("'");
@@ -140,13 +136,8 @@
         return builder.append(";").toString();
     }
 
-    /**
-     * Initializes descriptor from {@link QuerySqlField} annotation.
-     *
-     * @param sqlField {@link QuerySqlField} annotation.
-     */
-    protected void init(QuerySqlField sqlField) {
-        if (sqlField.index())
-            isIndexed = true;
+    /** {@inheritDoc} */
+    @Override protected void init(QuerySqlField sqlField) {
+        // No-op.
     }
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
similarity index 84%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
index 877167d..f117fb6 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
@@ -21,6 +21,8 @@
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
+
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -43,8 +45,11 @@
     public ValuePersistenceSettings(Element el) {
         super(el);
 
-        if (!PersistenceStrategy.POJO.equals(getStrategy()))
+        if (PersistenceStrategy.POJO != getStrategy()) {
+            init();
+
             return;
+        }
 
         NodeList nodes = el.getElementsByTagName(FIELD_ELEMENT);
 
@@ -54,12 +59,14 @@
             throw new IllegalStateException("Failed to initialize value fields for class '" + getJavaClass().getName() + "'");
 
         checkDuplicates(fields);
+
+        init();
     }
 
     /**
      * @return List of value fields.
      */
-    public List<PojoField> getFields() {
+    @Override public List<PojoField> getFields() {
         return fields == null ? null : Collections.unmodifiableList(fields);
     }
 
@@ -79,8 +86,14 @@
 
         if (fieldNodes == null || fieldNodes.getLength() == 0) {
             List<PropertyDescriptor> primitivePropDescriptors = PropertyMappingHelper.getPojoPropertyDescriptors(getJavaClass(), true);
-            for (PropertyDescriptor descriptor : primitivePropDescriptors)
-                list.add(new PojoValueField(descriptor));
+            for (PropertyDescriptor desc : primitivePropDescriptors) {
+                boolean valid = desc.getWriteMethod() != null ||
+                        desc.getReadMethod().getAnnotation(QuerySqlField.class) != null;
+
+                // Skip POJO field if it's read-only and is not annotated with @QuerySqlField.
+                if (valid)
+                    list.add(new PojoValueField(desc));
+            }
 
             return list;
         }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
similarity index 90%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
index e9f93a0..44d2d47 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
@@ -23,7 +23,6 @@
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.nio.ByteBuffer;
-import org.apache.ignite.IgniteException;
 import org.apache.ignite.internal.util.typedef.internal.U;
 
 /**
@@ -51,7 +50,7 @@
             return ByteBuffer.wrap(stream.toByteArray());
         }
         catch (IOException e) {
-            throw new IgniteException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
+            throw new IllegalStateException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
         }
         finally {
             U.closeQuiet(out);
@@ -71,7 +70,7 @@
             return in.readObject();
         }
         catch (Throwable e) {
-            throw new IgniteException("Failed to deserialize object from byte stream", e);
+            throw new IllegalStateException("Failed to deserialize object from byte stream", e);
         }
         finally {
             U.closeQuiet(in);
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
similarity index 95%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
index e43db1d..5d971e8 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
@@ -38,6 +38,13 @@
     public boolean tableExistenceRequired();
 
     /**
+     * Cassandra table to use for an operation.
+     *
+     * @return Table name.
+     */
+    public String getTable();
+
+    /**
      * Returns unbind CLQ statement for to be executed inside batch operation.
      *
      * @return Unbind CQL statement.
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
similarity index 88%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
index 506982f..b0e50ec 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
@@ -17,7 +17,10 @@
 
 package org.apache.ignite.cache.store.cassandra.session;
 
+import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
+
 import java.io.Closeable;
+import java.util.List;
 
 /**
  * Wrapper around Cassandra driver session, to automatically handle:
@@ -57,4 +60,11 @@
      * @param assistant execution assistance to perform the main operation logic.
      */
     public void execute(BatchLoaderAssistant assistant);
+
+    /**
+     * Executes all the mutations performed withing Ignite transaction against Cassandra database.
+     *
+     * @param mutations Mutations.
+     */
+    public void execute(List<Mutation> mutations);
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
similarity index 80%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
index 95b8581..ac11686 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
@@ -43,6 +43,7 @@
 import org.apache.ignite.cache.store.cassandra.common.RandomSleeper;
 import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
 import org.apache.ignite.cache.store.cassandra.session.pool.SessionPool;
+import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
 import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
 
 /**
@@ -68,7 +69,7 @@
     private volatile Session ses;
 
     /** Number of references to Cassandra driver session (for multithreaded environment). */
-    private volatile int refCnt = 0;
+    private volatile int refCnt;
 
     /** Storage for the session prepared statements */
     private static final Map<String, PreparedStatement> sesStatements = new HashMap<>();
@@ -129,7 +130,7 @@
                 }
 
                 try {
-                    PreparedStatement preparedSt = prepareStatement(assistant.getStatement(),
+                    PreparedStatement preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(),
                         assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
 
                     if (preparedSt == null)
@@ -151,7 +152,7 @@
                             return null;
                         }
 
-                        handleTableAbsenceError(assistant.getPersistenceSettings());
+                        handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
                     }
                     else if (CassandraHelper.isHostsAvailabilityError(e))
                         handleHostsAvailabilityError(e, attempt, errorMsg);
@@ -162,7 +163,8 @@
                         throw new IgniteException(errorMsg, e);
                 }
 
-                sleeper.sleep();
+                if (!CassandraHelper.isTableAbsenceError(error))
+                    sleeper.sleep();
 
                 attempt++;
             }
@@ -210,7 +212,7 @@
 
                 List<Cache.Entry<Integer, ResultSetFuture>> futResults = new LinkedList<>();
 
-                PreparedStatement preparedSt = prepareStatement(assistant.getStatement(),
+                PreparedStatement preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(),
                     assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
 
                 if (preparedSt == null)
@@ -232,7 +234,7 @@
                                     return assistant.processedData();
 
                                 tblAbsenceEx = e;
-                                handleTableAbsenceError(assistant.getPersistenceSettings());
+                                handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
                             }
                             else if (CassandraHelper.isHostsAvailabilityError(e)) {
                                 hostsAvailEx = e;
@@ -307,7 +309,7 @@
                         return assistant.processedData();
 
                     error = tblAbsenceEx;
-                    handleTableAbsenceError(assistant.getPersistenceSettings());
+                    handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
                 }
 
                 if (hostsAvailEx != null) {
@@ -320,7 +322,8 @@
                     handlePreparedStatementClusterError(prepStatEx);
                 }
 
-                sleeper.sleep();
+                if (!CassandraHelper.isTableAbsenceError(error))
+                    sleeper.sleep();
 
                 attempt++;
             }
@@ -402,6 +405,103 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void execute(List<Mutation> mutations) {
+        if (mutations == null || mutations.isEmpty())
+            return;
+
+        Throwable error = null;
+        String errorMsg = "Failed to apply " + mutations.size() + " mutations performed withing Ignite " +
+                "transaction into Cassandra";
+
+        int attempt = 0;
+        boolean tableExistenceRequired = false;
+        Map<String, PreparedStatement> statements = new HashMap<>();
+        Map<String, KeyValuePersistenceSettings> tableSettings = new HashMap<>();
+        RandomSleeper sleeper = newSleeper();
+
+        incrementSessionRefs();
+
+        try {
+            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
+                error = null;
+
+                if (attempt != 0) {
+                    log.warning("Trying " + (attempt + 1) + " attempt to apply " + mutations.size() + " mutations " +
+                            "performed withing Ignite transaction into Cassandra");
+                }
+
+                try {
+                    BatchStatement batch = new BatchStatement();
+
+                    // accumulating all the mutations into one Cassandra logged batch
+                    for (Mutation mutation : mutations) {
+                        String key = mutation.getTable() + mutation.getClass().getName();
+                        PreparedStatement st = statements.get(key);
+
+                        if (st == null) {
+                            st = prepareStatement(mutation.getTable(), mutation.getStatement(),
+                                    mutation.getPersistenceSettings(), mutation.tableExistenceRequired());
+
+                            if (st != null)
+                                statements.put(key, st);
+                        }
+
+                        if (st != null)
+                            batch.add(mutation.bindStatement(st));
+
+                        if (attempt == 0) {
+                            if (mutation.tableExistenceRequired()) {
+                                tableExistenceRequired = true;
+
+                                if (!tableSettings.containsKey(mutation.getTable()))
+                                    tableSettings.put(mutation.getTable(), mutation.getPersistenceSettings());
+                            }
+                        }
+                    }
+
+                    // committing logged batch into Cassandra
+                    if (batch.size() > 0)
+                        session().execute(tuneStatementExecutionOptions(batch));
+
+                    return;
+                } catch (Throwable e) {
+                    error = e;
+
+                    if (CassandraHelper.isTableAbsenceError(e)) {
+                        if (tableExistenceRequired) {
+                            for (Map.Entry<String, KeyValuePersistenceSettings> entry : tableSettings.entrySet())
+                                handleTableAbsenceError(entry.getKey(), entry.getValue());
+                        }
+                        else
+                            return;
+                    } else if (CassandraHelper.isHostsAvailabilityError(e)) {
+                        if (handleHostsAvailabilityError(e, attempt, errorMsg))
+                            statements.clear();
+                    } else if (CassandraHelper.isPreparedStatementClusterError(e)) {
+                        handlePreparedStatementClusterError(e);
+                        statements.clear();
+                    } else {
+                        // For an error which we don't know how to handle, we will not try next attempts and terminate.
+                        throw new IgniteException(errorMsg, e);
+                    }
+                }
+
+                if (!CassandraHelper.isTableAbsenceError(error))
+                    sleeper.sleep();
+
+                attempt++;
+            }
+        } catch (Throwable e) {
+            error = e;
+        } finally {
+            decrementSessionRefs();
+        }
+
+        log.error(errorMsg, error);
+        throw new IgniteException(errorMsg, error);
+    }
+
+    /** {@inheritDoc} */
     @Override public synchronized void close() throws IOException {
         if (decrementSessionRefs() == 0 && ses != null) {
             SessionPool.put(this, ses);
@@ -475,7 +575,7 @@
      * @param tblExistenceRequired Flag indicating if table existence is required for the statement.
      * @return Prepared statement.
      */
-    private PreparedStatement prepareStatement(String statement, KeyValuePersistenceSettings settings,
+    private PreparedStatement prepareStatement(String table, String statement, KeyValuePersistenceSettings settings,
         boolean tblExistenceRequired) {
 
         int attempt = 0;
@@ -507,7 +607,7 @@
                         if (!tblExistenceRequired)
                             return null;
 
-                        handleTableAbsenceError(settings);
+                        handleTableAbsenceError(table, settings);
                     }
                     else if (CassandraHelper.isHostsAvailabilityError(e))
                         handleHostsAvailabilityError(e, attempt, errorMsg);
@@ -517,7 +617,8 @@
                     error = e;
                 }
 
-                sleeper.sleep();
+                if (!CassandraHelper.isTableAbsenceError(error))
+                    sleeper.sleep();
 
                 attempt++;
             }
@@ -574,24 +675,25 @@
      *
      * @param settings Persistence settings.
      */
-    private void createTable(KeyValuePersistenceSettings settings) {
+    private void createTable(String table, KeyValuePersistenceSettings settings) {
         int attempt = 0;
         Throwable error = null;
-        String errorMsg = "Failed to create Cassandra table '" + settings.getTableFullName() + "'";
+        String tableFullName = settings.getKeyspace() + "." + table;
+        String errorMsg = "Failed to create Cassandra table '" + tableFullName + "'";
 
         while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
             try {
                 log.info("-----------------------------------------------------------------------");
-                log.info("Creating Cassandra table '" + settings.getTableFullName() + "'");
+                log.info("Creating Cassandra table '" + tableFullName + "'");
                 log.info("-----------------------------------------------------------------------\n\n" +
-                    settings.getTableDDLStatement() + "\n");
+                        settings.getTableDDLStatement(table) + "\n");
                 log.info("-----------------------------------------------------------------------");
-                session().execute(settings.getTableDDLStatement());
-                log.info("Cassandra table '" + settings.getTableFullName() + "' was successfully created");
+                session().execute(settings.getTableDDLStatement(table));
+                log.info("Cassandra table '" + tableFullName + "' was successfully created");
                 return;
             }
             catch (AlreadyExistsException ignored) {
-                log.info("Cassandra table '" + settings.getTableFullName() + "' already exist");
+                log.info("Cassandra table '" + tableFullName + "' already exist");
                 return;
             }
             catch (Throwable e) {
@@ -599,7 +701,7 @@
                     throw new IgniteException(errorMsg, e);
 
                 if (CassandraHelper.isKeyspaceAbsenceError(e)) {
-                    log.warning("Failed to create Cassandra table '" + settings.getTableFullName() +
+                    log.warning("Failed to create Cassandra table '" + tableFullName +
                         "' cause appropriate keyspace doesn't exist", e);
                     createKeyspace(settings);
                 }
@@ -620,31 +722,38 @@
      *
      * @param settings Persistence settings.
      */
-    private void createTableIndexes(KeyValuePersistenceSettings settings) {
-        if (settings.getIndexDDLStatements() == null || settings.getIndexDDLStatements().isEmpty())
+    private void createTableIndexes(String table, KeyValuePersistenceSettings settings) {
+        List<String> indexDDLStatements = settings.getIndexDDLStatements(table);
+
+        if (indexDDLStatements == null || indexDDLStatements.isEmpty())
             return;
 
         int attempt = 0;
         Throwable error = null;
-        String errorMsg = "Failed to create indexes for Cassandra table " + settings.getTableFullName();
+        String tableFullName = settings.getKeyspace() + "." + table;
+        String errorMsg = "Failed to create indexes for Cassandra table " + tableFullName;
 
         while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
             try {
-                log.info("Creating indexes for Cassandra table '" + settings.getTableFullName() + "'");
+                log.info("-----------------------------------------------------------------------");
+                log.info("Creating indexes for Cassandra table '" + tableFullName + "'");
+                log.info("-----------------------------------------------------------------------");
 
-                for (String statement : settings.getIndexDDLStatements()) {
+                for (String statement : indexDDLStatements) {
                     try {
+                        log.info(statement);
+                        log.info("-----------------------------------------------------------------------");
                         session().execute(statement);
                     }
                     catch (AlreadyExistsException ignored) {
                     }
                     catch (Throwable e) {
-                        if (!(e instanceof InvalidQueryException) || !e.getMessage().equals("Index already exists"))
+                        if (!(e instanceof InvalidQueryException) || !"Index already exists".equals(e.getMessage()))
                             throw new IgniteException(errorMsg, e);
                     }
                 }
 
-                log.info("Indexes for Cassandra table '" + settings.getTableFullName() + "' were successfully created");
+                log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created");
 
                 return;
             }
@@ -652,7 +761,7 @@
                 if (CassandraHelper.isHostsAvailabilityError(e))
                     handleHostsAvailabilityError(e, attempt, errorMsg);
                 else if (CassandraHelper.isTableAbsenceError(e))
-                    createTable(settings);
+                    createTable(table, settings);
                 else
                     throw new IgniteException(errorMsg, e);
 
@@ -700,22 +809,24 @@
      *
      * @param settings Persistence settings.
      */
-    private void handleTableAbsenceError(KeyValuePersistenceSettings settings) {
+    private void handleTableAbsenceError(String table, KeyValuePersistenceSettings settings) {
         int hndNum = tblAbsenceHandlersCnt.incrementAndGet();
 
+        String tableFullName = settings.getKeyspace() + "." + table;
+
         try {
             synchronized (tblAbsenceHandlersCnt) {
                 // Oooops... I am not the first thread who tried to handle table absence problem.
                 if (hndNum != 0) {
-                    log.warning("Table " + settings.getTableFullName() + " absence problem detected. " +
+                    log.warning("Table " + tableFullName + " absence problem detected. " +
                             "Another thread already fixed it.");
                     return;
                 }
 
-                log.warning("Table " + settings.getTableFullName() + " absence problem detected. " +
+                log.warning("Table " + tableFullName + " absence problem detected. " +
                         "Trying to create table.");
 
-                IgniteException error = new IgniteException("Failed to create Cassandra table " + settings.getTableFullName());
+                IgniteException error = new IgniteException("Failed to create Cassandra table " + tableFullName);
 
                 int attempt = 0;
 
@@ -724,14 +835,14 @@
 
                     try {
                         createKeyspace(settings);
-                        createTable(settings);
-                        createTableIndexes(settings);
+                        createTable(table, settings);
+                        createTableIndexes(table, settings);
                     }
                     catch (Throwable e) {
                         if (CassandraHelper.isHostsAvailabilityError(e))
                             handleHostsAvailabilityError(e, attempt, null);
                         else
-                            throw new IgniteException("Failed to create Cassandra table " + settings.getTableFullName(), e);
+                            throw new IgniteException("Failed to create Cassandra table " + tableFullName, e);
 
                         error = (e instanceof IgniteException) ? (IgniteException)e : new IgniteException(e);
                     }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
similarity index 87%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
index 867f58d..b0dba8b 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
@@ -30,14 +30,21 @@
  */
 public interface ExecutionAssistant<R> {
     /**
-     * Indicates if Cassandra table existence is required for operation.
+     * Indicates if Cassandra table existence is required for an operation.
      *
      * @return true if table existence required.
      */
     public boolean tableExistenceRequired();
 
     /**
-     * Returns CQL statement to be used for operation.
+     * Cassandra table to use for an operation.
+     *
+     * @return Table name.
+     */
+    public String getTable();
+
+    /**
+     * Returns CQL statement to be used for an operation.
      *
      * @return CQL statement.
      */
@@ -53,7 +60,7 @@
     public BoundStatement bindStatement(PreparedStatement statement);
 
     /**
-     * Persistence settings to use for operation.
+     * Persistence settings to use for an operation.
      *
      * @return persistence settings.
      */
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
similarity index 98%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
index fc4a907..95938bd 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
@@ -146,7 +146,7 @@
 
         synchronized (sessions) {
             try {
-                if (sessions.size() == 0)
+                if (sessions.isEmpty())
                     return;
 
                 wrappers = new LinkedList<>();
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionWrapper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionWrapper.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionWrapper.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionWrapper.java
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java
new file mode 100644
index 0000000..2625e87
--- /dev/null
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.store.cassandra.session.transaction;
+
+import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
+import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
+
+/**
+ * Base class to inherit from to implement specific mutations operation.
+ */
+public abstract class BaseMutation implements Mutation {
+    /** Cassandra table to use. */
+    private final String table;
+
+    /** Persistence controller to be utilized for mutation. */
+    private final PersistenceController ctrl;
+
+    /**
+     * Creates instance of mutation operation.
+     *
+     * @param table Cassandra table which should be used for the mutation.
+     * @param ctrl Persistence controller to use.
+     */
+    public BaseMutation(String table, PersistenceController ctrl) {
+        if (table == null || table.trim().isEmpty())
+            throw new IllegalArgumentException("Table name should be specified");
+
+        if (ctrl == null)
+            throw new IllegalArgumentException("Persistence controller should be specified");
+
+        this.table = table;
+        this.ctrl = ctrl;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getTable() {
+        return table;
+    }
+
+    /** {@inheritDoc} */
+    @Override public KeyValuePersistenceSettings getPersistenceSettings() {
+        return ctrl.getPersistenceSettings();
+    }
+
+    /**
+     * Service method to get persistence controller instance
+     *
+     * @return Persistence controller to use for the mutation
+     */
+    protected PersistenceController controller() {
+        return ctrl;
+    }
+}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java
new file mode 100644
index 0000000..79c0bfe
--- /dev/null
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.store.cassandra.session.transaction;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
+
+/**
+ * Mutation which deletes object from Cassandra.
+ */
+public class DeleteMutation extends BaseMutation {
+    /** Ignite cache key of the object which should be deleted. */
+    private final Object key;
+
+    /**
+     * Creates instance of delete mutation operation.
+     *
+     * @param key Ignite cache key of the object which should be deleted.
+     * @param table Cassandra table which should be used for the mutation.
+     * @param ctrl Persistence controller to use.
+     */
+    public DeleteMutation(Object key, String table, PersistenceController ctrl) {
+        super(table, ctrl);
+        this.key = key;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean tableExistenceRequired() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getStatement() {
+        return controller().getDeleteStatement(getTable());
+    }
+
+    /** {@inheritDoc} */
+    @Override public BoundStatement bindStatement(PreparedStatement statement) {
+        return controller().bindKey(statement, key);
+    }
+}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java
new file mode 100644
index 0000000..f3fb354
--- /dev/null
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.store.cassandra.session.transaction;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
+
+/**
+ * Provides information about particular mutation operation performed withing transaction.
+ */
+public interface Mutation {
+    /**
+     * Cassandra table to use for an operation.
+     *
+     * @return Table name.
+     */
+    public String getTable();
+
+    /**
+     * Indicates if Cassandra tables existence is required for this operation.
+     *
+     * @return {@code true} true if table existence required.
+     */
+    public boolean tableExistenceRequired();
+
+    /**
+     *  Returns Ignite cache key/value persistence settings.
+     *
+     * @return persistence settings.
+     */
+    public KeyValuePersistenceSettings getPersistenceSettings();
+
+    /**
+     * Returns unbind CLQ statement for to be executed.
+     *
+     * @return Unbind CQL statement.
+     */
+    public String getStatement();
+
+    /**
+     * Binds prepared statement to current Cassandra session.
+     *
+     * @param statement Statement.
+     * @return Bounded statement.
+     */
+    public BoundStatement bindStatement(PreparedStatement statement);
+}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java
new file mode 100644
index 0000000..3c74378
--- /dev/null
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.cache.store.cassandra.session.transaction;
+
+import javax.cache.Cache;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+
+import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
+
+/**
+ * Mutation which writes(inserts) object into Cassandra.
+ */
+public class WriteMutation extends BaseMutation {
+    /** Ignite cache entry to be inserted into Cassandra. */
+    private final Cache.Entry entry;
+
+    /**
+     * Creates instance of delete mutation operation.
+     *
+     * @param entry Ignite cache entry to be inserted into Cassandra.
+     * @param table Cassandra table which should be used for the mutation.
+     * @param ctrl Persistence controller to use.
+     */
+    public WriteMutation(Cache.Entry entry, String table, PersistenceController ctrl) {
+        super(table, ctrl);
+        this.entry = entry;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean tableExistenceRequired() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getStatement() {
+        return controller().getWriteStatement(getTable());
+    }
+
+    /** {@inheritDoc} */
+    @Override public BoundStatement bindStatement(PreparedStatement statement) {
+        return controller().bindKeyValue(statement, entry.getKey(), entry.getValue());
+    }
+}
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
similarity index 66%
copy from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
copy to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
index e1fd60c..7141845 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
@@ -15,23 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.ignite.cache.store.cassandra.datasource;
-
 /**
- * Provides credentials for Cassandra (instead of specifying user/password directly in Spring context XML).
+ * Contains mutations implementation, to store changes made inside Ignite transaction
  */
-public interface Credentials {
-    /**
-     * Returns user name
-     *
-     * @return user name
-     */
-    public String getUser();
-
-    /**
-     * Returns password
-     *
-     * @return password
-     */
-    public String getPassword();
-}
+package org.apache.ignite.cache.store.cassandra.session.transaction;
\ No newline at end of file
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
similarity index 85%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
index 4f40478..e3ec391 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
+++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
@@ -18,6 +18,8 @@
 package org.apache.ignite.cache.store.cassandra.utils;
 
 import java.io.File;
+import java.util.List;
+
 import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
 
 /**
@@ -44,14 +46,24 @@
 
             try {
                 KeyValuePersistenceSettings settings = new KeyValuePersistenceSettings(file);
+                String table = settings.getTable() != null ? settings.getTable() : "my_table";
+
                 System.out.println("-------------------------------------------------------------");
                 System.out.println("DDL for keyspace/table from file: " + arg);
                 System.out.println("-------------------------------------------------------------");
                 System.out.println();
                 System.out.println(settings.getKeyspaceDDLStatement());
                 System.out.println();
-                System.out.println(settings.getTableDDLStatement());
+                System.out.println(settings.getTableDDLStatement(table));
                 System.out.println();
+
+                List<String> statements = settings.getIndexDDLStatements(table);
+                if (statements != null && !statements.isEmpty()) {
+                    for (String st : statements) {
+                        System.out.println(st);
+                        System.out.println();
+                    }
+                }
             }
             catch (Throwable e) {
                 System.out.println("-------------------------------------------------------------");
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
similarity index 100%
rename from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
rename to modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/README.txt b/modules/cassandra/store/src/test/bootstrap/aws/README.txt
new file mode 100644
index 0000000..a61b235
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/README.txt
@@ -0,0 +1,13 @@
+Shell scripts to spin up Ignite, Cassandra and Load tests clusters in AWS.
+
+1) cassandra - bootstrap scripts for Cassandra cluster nodes
+2) ganglia - bootstrap scripts for Ganglia master and agents
+3) ignite - bootstrap scripts for Ignite cluster nodes
+4) tests - bootstrap scripts for Load Tests cluster nodes
+5) common.sh - definitions for common functions
+6) env.sh - definitions for common variables
+7) log-collector.sh - log collector daemon script, to collect logs and upload them to S3
+
+For more details please look at the documentation:
+
+    https://apacheignite.readme.io/docs/aws-infrastructure-deployment
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
new file mode 100644
index 0000000..017b1b1
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
@@ -0,0 +1,336 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Bootstrap script to spin up Cassandra cluster
+# -----------------------------------------------------------------------------------------------
+
+# URL to download AWS CLI tools
+AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
+
+# URL to download JDK
+JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
+
+# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
+TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
+
+# Terminates script execution and upload logs to S3
+terminate()
+{
+    SUCCESS_URL=$S3_CASSANDRA_BOOTSTRAP_SUCCESS
+    FAILURE_URL=$S3_CASSANDRA_BOOTSTRAP_FAILURE
+
+    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
+        SUCCESS_URL=${SUCCESS_URL}/
+    fi
+
+    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
+        FAILURE_URL=${FAILURE_URL}/
+    fi
+
+    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    msg=$host_name
+
+    if [ -n "$1" ]; then
+        echo "[ERROR] $1"
+        echo "[ERROR]-----------------------------------------------------"
+        echo "[ERROR] Cassandra node bootstrap failed"
+        echo "[ERROR]-----------------------------------------------------"
+        msg=$1
+
+        if [ -z "$FAILURE_URL" ]; then
+            exit 1
+        fi
+
+        reportFolder=${FAILURE_URL}${host_name}
+        reportFile=$reportFolder/__error__
+    else
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Cassandra node bootstrap successfully completed"
+        echo "[INFO]-----------------------------------------------------"
+
+        if [ -z "$SUCCESS_URL" ]; then
+            exit 0
+        fi
+
+        reportFolder=${SUCCESS_URL}${host_name}
+        reportFile=$reportFolder/__success__
+    fi
+
+    echo $msg > /opt/bootstrap-result
+
+    aws s3 rm --recursive $reportFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop report folder: $reportFolder"
+    fi
+
+    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
+    fi
+
+    rm -f /opt/bootstrap-result
+
+    if [ -n "$1" ]; then
+        exit 1
+    fi
+
+    exit 0
+}
+
+# Downloads specified package
+downloadPackage()
+{
+    echo "[INFO] Downloading $3 package from $1 into $2"
+
+    for i in 0 9;
+    do
+        if [[ "$1" == s3* ]]; then
+            aws s3 cp $1 $2
+            code=$?
+        else
+            curl "$1" -o "$2"
+            code=$?
+        fi
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] $3 package successfully downloaded from $1 into $2"
+            return 0
+        fi
+
+        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to download $3 package from $1 are failed"
+}
+
+# Downloads and setup JDK
+setupJava()
+{
+    rm -Rf /opt/java /opt/jdk.tar.gz
+
+    echo "[INFO] Downloading 'jdk'"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
+    if [ $? -ne 0 ]; then
+        terminate "Failed to download 'jdk'"
+    fi
+
+    echo "[INFO] Untaring 'jdk'"
+    tar -xvzf /opt/jdk.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar 'jdk'"
+    fi
+
+    rm -Rf /opt/jdk.tar.gz
+
+    unzipDir=$(ls /opt | grep "jdk")
+    if [ "$unzipDir" != "java" ]; then
+        mv /opt/$unzipDir /opt/java
+    fi
+}
+
+# Downloads and setup AWS CLI
+setupAWSCLI()
+{
+    echo "[INFO] Installing 'awscli'"
+    pip install --upgrade awscli
+    if [ $? -eq 0 ]; then
+        return 0
+    fi
+
+    echo "[ERROR] Failed to install 'awscli' using pip"
+    echo "[INFO] Trying to install awscli using zip archive"
+    echo "[INFO] Downloading awscli zip"
+
+    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
+
+    echo "[INFO] Unzipping awscli zip"
+    unzip /opt/awscli-bundle.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip awscli zip"
+    fi
+
+    rm -Rf /opt/awscli-bundle.zip
+
+    echo "[INFO] Installing awscli"
+    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install awscli"
+    fi
+
+    echo "[INFO] Successfully installed awscli from zip archive"
+}
+
+# Setup all the pre-requisites (packages, settings and etc.)
+setupPreRequisites()
+{
+    echo "[INFO] Installing 'wget' package"
+    yum -y install wget
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'wget' package"
+    fi
+
+    echo "[INFO] Installing 'net-tools' package"
+    yum -y install net-tools
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'net-tools' package"
+    fi
+
+    echo "[INFO] Installing 'python' package"
+    yum -y install python
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'python' package"
+    fi
+
+    echo "[INFO] Installing 'unzip' package"
+    yum -y install unzip
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'unzip' package"
+    fi
+
+    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
+
+    echo "[INFO] Installing 'pip'"
+    python /opt/get-pip.py
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'pip'"
+    fi
+}
+
+# Downloads and setup tests package
+setupTestsPackage()
+{
+    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
+
+    rm -Rf /opt/ignite-cassandra-tests
+
+    unzip /opt/ignite-cassandra-tests.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip tests package"
+    fi
+
+    rm -f /opt/ignite-cassandra-tests.zip
+
+    unzipDir=$(ls /opt | grep "ignite-cassandra")
+    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
+        mv /opt/$unzipDir /opt/ignite-cassandra-tests
+    fi
+
+    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
+
+    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "cassandra"
+
+    setupNTP
+
+    echo "[INFO] Starting logs collector daemon"
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_CASSANDRA_LOGS/$HOST_NAME" "/opt/cassandra/logs" "/opt/cassandra/cassandra-start.log" > /opt/logs-collector.log &
+
+    echo "[INFO] Logs collector daemon started: $!"
+
+    echo "----------------------------------------------------------------------------------------"
+    printInstanceInfo
+    echo "----------------------------------------------------------------------------------------"
+    tagInstance
+    bootstrapGangliaAgent "cassandra" 8641
+}
+
+# Downloads Cassandra package
+downloadCassandra()
+{
+    downloadPackage "$CASSANDRA_DOWNLOAD_URL" "/opt/apache-cassandra.tar.gz" "Cassandra"
+
+    rm -Rf /opt/cassandra
+
+    echo "[INFO] Untaring Cassandra package"
+    tar -xvzf /opt/apache-cassandra.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar Cassandra package"
+    fi
+
+    rm -f /opt/apache-cassandra.tar.gz
+
+    unzipDir=$(ls /opt | grep "cassandra" | grep "apache")
+    if [ "$unzipDir" != "cassandra" ]; then
+        mv /opt/$unzipDir /opt/cassandra
+    fi
+}
+
+# Setups Cassandra
+setupCassandra()
+{
+    echo "[INFO] Creating 'cassandra' group"
+    exists=$(cat /etc/group | grep cassandra)
+    if [ -z "$exists" ]; then
+        groupadd cassandra
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'cassandra' group"
+        fi
+    fi
+
+    echo "[INFO] Creating 'cassandra' user"
+    exists=$(cat /etc/passwd | grep cassandra)
+    if [ -z "$exists" ]; then
+        useradd -g cassandra cassandra
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'cassandra' user"
+        fi
+    fi
+
+    rm -f /opt/cassandra/conf/cassandra-env.sh /opt/cassandra/conf/cassandra-template.yaml
+
+    cp /opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-env.sh /opt/cassandra/conf
+    cp /opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-template.yaml /opt/cassandra/conf
+
+    chown -R cassandra:cassandra /opt/cassandra /opt/ignite-cassandra-tests
+
+    createCassandraStorageLayout
+
+    cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_DATA_DIR\}/$CASSANDRA_DATA_DIR/g" > /opt/cassandra/conf/cassandra-template-1.yaml
+    cat /opt/cassandra/conf/cassandra-template-1.yaml | sed -r "s/\\\$\{CASSANDRA_COMMITLOG_DIR\}/$CASSANDRA_COMMITLOG_DIR/g" > /opt/cassandra/conf/cassandra-template-2.yaml
+    cat /opt/cassandra/conf/cassandra-template-2.yaml | sed -r "s/\\\$\{CASSANDRA_CACHES_DIR\}/$CASSANDRA_CACHES_DIR/g" > /opt/cassandra/conf/cassandra-template-3.yaml
+
+    rm -f /opt/cassandra/conf/cassandra-template.yaml /opt/cassandra/conf/cassandra-template-1.yaml /opt/cassandra/conf/cassandra-template-2.yaml
+    mv /opt/cassandra/conf/cassandra-template-3.yaml /opt/cassandra/conf/cassandra-template.yaml
+
+    echo "export JAVA_HOME=/opt/java" >> $1
+    echo "export CASSANDRA_HOME=/opt/cassandra" >> $1
+    echo "export PATH=\$JAVA_HOME/bin:\$CASSANDRA_HOME/bin:\$PATH" >> $1
+}
+
+###################################################################################################################
+
+echo "[INFO]-----------------------------------------------------------------"
+echo "[INFO] Bootstrapping Cassandra node"
+echo "[INFO]-----------------------------------------------------------------"
+
+setupPreRequisites
+setupJava
+setupAWSCLI
+setupTestsPackage
+downloadCassandra
+setupCassandra "/root/.bash_profile"
+
+cmd="/opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-start.sh"
+
+#sudo -u cassandra -g cassandra sh -c "$cmd | tee /opt/cassandra/cassandra-start.log"
+
+$cmd | tee /opt/cassandra/cassandra-start.log
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
similarity index 97%
rename from modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh
rename to modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
index 11dfc50..ba76401 100644
--- a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh
+++ b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
@@ -15,6 +15,10 @@
 # limitations under the License.
 #
 
+# -----------------------------------------------------------------------------------------------
+# Environment setup script from Cassandra distribution
+# -----------------------------------------------------------------------------------------------
+
 calculate_heap_sizes()
 {
     case "`uname`" in
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh
new file mode 100644
index 0000000..4a6daef
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh
@@ -0,0 +1,217 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Script to start Cassandra daemon (used by cassandra-bootstrap.sh)
+# -----------------------------------------------------------------------------------------------
+
+#profile=/home/cassandra/.bash_profile
+profile=/root/.bash_profile
+
+. $profile
+. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "cassandra"
+
+# Setups Cassandra seeds for this EC2 node. Looks for the information in S3 about
+# already up and running Cassandra cluster nodes
+setupCassandraSeeds()
+{
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        echo "[INFO] Setting up Cassandra seeds"
+
+        CLUSTER_SEEDS=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+        echo "[INFO] Using host address as a seed for the first Cassandra node: $CLUSTER_SEEDS"
+
+        aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY::-1}
+        if [ $? -ne 0 ]; then
+            terminate "Failed to clean Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
+        fi
+    else
+        setupClusterSeeds "cassandra" "true"
+        CLUSTER_SEEDS=$(echo $CLUSTER_SEEDS | sed -r "s/ /,/g")
+    fi
+
+    cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CLUSTER_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml
+}
+
+# Gracefully starts Cassandra daemon and waits until it joins Cassandra cluster
+startCassandra()
+{
+    echo "[INFO]-------------------------------------------------------------"
+    echo "[INFO] Trying attempt $START_ATTEMPT to start Cassandra daemon"
+    echo "[INFO]-------------------------------------------------------------"
+    echo ""
+
+    setupCassandraSeeds
+
+    waitToJoinCluster
+
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY::-1}
+        if [ $? -ne 0 ]; then
+            terminate "Failed to clean Cassandra node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
+        fi
+    fi
+
+    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
+    proc=($proc)
+
+    if [ -n "${proc[1]}" ]; then
+        echo "[INFO] Terminating existing Cassandra process ${proc[1]}"
+        kill -9 ${proc[1]}
+    fi
+
+    echo "[INFO] Starting Cassandra"
+    rm -Rf /opt/cassandra/logs/* /storage/cassandra/*
+    /opt/cassandra/bin/cassandra -R &
+
+    echo "[INFO] Cassandra job id: $!"
+
+    sleep 1m
+
+    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
+}
+
+#######################################################################################################
+
+START_ATTEMPT=0
+
+# Cleans all the previous metadata about this EC2 node
+unregisterNode
+
+# Tries to get first-node lock
+tryToGetFirstNodeLock
+
+echo "[INFO]-----------------------------------------------------------------"
+
+if [ "$FIRST_NODE_LOCK" == "true" ]; then
+    echo "[INFO] Starting first Cassandra node"
+else
+    echo "[INFO] Starting Cassandra node"
+fi
+
+echo "[INFO]-----------------------------------------------------------------"
+printInstanceInfo
+echo "[INFO]-----------------------------------------------------------------"
+
+if [ "$FIRST_NODE_LOCK" != "true" ]; then
+    waitFirstClusterNodeRegistered "true"
+else
+    cleanupMetadata
+fi
+
+# Start Cassandra daemon
+startCassandra
+
+startTime=$(date +%s)
+
+# Trying multiple attempts to start Cassandra daemon
+while true; do
+    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
+
+    /opt/cassandra/bin/nodetool status &> /dev/null
+
+    if [ $? -eq 0 ]; then
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Cassandra daemon successfully started"
+        echo "[INFO]-----------------------------------------------------"
+        echo $proc
+        echo "[INFO]-----------------------------------------------------"
+
+        # Once node joined the cluster we need to remove cluster-join lock
+        # to allow other EC2 nodes to acquire it and join cluster sequentially
+        removeClusterJoinLock
+
+        break
+    fi
+
+    currentTime=$(date +%s)
+    duration=$(( $currentTime-$startTime ))
+    duration=$(( $duration/60 ))
+
+    if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
+        if [ "$FIRST_NODE_LOCK" == "true" ]; then
+            # If the first node of Cassandra cluster failed to start Cassandra daemon in SERVICE_STARTUP_TIME min,
+            # we will not try any other attempts and just terminate with error. Terminate function itself, will
+            # take care about removing all the locks holding by this node.
+            terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first Cassandra daemon is still not up and running"
+        else
+            # If node isn't the first node of Cassandra cluster and it failed to start we need to
+            # remove cluster-join lock to allow other EC2 nodes to acquire it
+            removeClusterJoinLock
+
+            # If node failed all SERVICE_START_ATTEMPTS attempts to start Cassandra daemon we will not
+            # try anymore and terminate with error
+            if [ $START_ATTEMPT -gt $SERVICE_START_ATTEMPTS ]; then
+                terminate "${SERVICE_START_ATTEMPTS} attempts exceed, but Cassandra daemon is still not up and running"
+            fi
+
+            # New attempt to start Cassandra daemon
+            startCassandra
+        fi
+
+        continue
+    fi
+
+    # Checking for the situation when two nodes trying to simultaneously join Cassandra cluster.
+    # This actually can happen only in not standard situation, when you are trying to start
+    # Cassandra daemon on some EC2 nodes manually and not using bootstrap script.
+    concurrencyError=$(cat /opt/cassandra/logs/system.log | grep "java.lang.UnsupportedOperationException: Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true")
+
+    if [ -n "$concurrencyError" ] && [ "$FIRST_NODE_LOCK" != "true" ]; then
+        # Remove cluster-join lock to allow other EC2 nodes to acquire it
+        removeClusterJoinLock
+
+        echo "[WARN] Failed to concurrently start Cassandra daemon. Sleeping for extra 30sec"
+        sleep 30s
+
+        # New attempt to start Cassandra daemon
+        startCassandra
+
+        continue
+    fi
+
+    # Handling situation when Cassandra daemon process abnormally terminated
+    if [ -z "$proc" ]; then
+        # If this is the first node of Cassandra cluster just terminating with error
+        if [ "$FIRST_NODE_LOCK" == "true" ]; then
+            terminate "Failed to start Cassandra daemon"
+        fi
+
+        # Remove cluster-join lock to allow other EC2 nodes to acquire it
+        removeClusterJoinLock
+
+        echo "[WARN] Failed to start Cassandra daemon. Sleeping for extra 30sec"
+        sleep 30s
+
+        # New attempt to start Cassandra daemon
+        startCassandra
+
+        continue
+    fi
+
+    echo "[INFO] Waiting for Cassandra daemon to start, time passed ${duration}min"
+    sleep 30s
+done
+
+# Once Cassandra daemon successfully started we registering new Cassandra node in S3
+registerNode
+
+# Terminating script with zero exit code
+terminate
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
similarity index 99%
rename from modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
rename to modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
index 965e34e..e621886 100644
--- a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
+++ b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
@@ -154,13 +154,12 @@
 # will spread data evenly across them, subject to the granularity of
 # the configured compaction strategy.
 # If not set, the default directory is $CASSANDRA_HOME/data/data.
-data_file_directories:
-     - /storage/cassandra/data
+data_file_directories: ${CASSANDRA_DATA_DIR}
 
 # commit log.  when running on magnetic HDD, this should be a
 # separate spindle than the data directories.
 # If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
-commitlog_directory: /storage/cassandra/commitlog
+commitlog_directory: ${CASSANDRA_COMMITLOG_DIR}
 
 # policy for data disk failures:
 # die: shut down gossip and client transports and kill the JVM for any fs errors or
@@ -285,7 +284,7 @@
 
 # saved caches
 # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
-saved_caches_directory: /storage/cassandra/saved_caches
+saved_caches_directory: ${CASSANDRA_CACHES_DIR}
 
 # commitlog_sync may be either "periodic" or "batch." 
 # 
@@ -757,7 +756,7 @@
 #
 # You can use a custom Snitch by setting this to the full class name
 # of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: GossipingPropertyFileSnitch
+endpoint_snitch: Ec2Snitch
 
 # controls how often to perform the more expensive part of host score
 # calculation
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/common.sh b/modules/cassandra/store/src/test/bootstrap/aws/common.sh
new file mode 100644
index 0000000..6469e95
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/common.sh
@@ -0,0 +1,1481 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Common purpose functions used by bootstrap scripts
+# -----------------------------------------------------------------------------------------------
+
+# Validates values of the main environment variables specified in env.sh
+validate()
+{
+    if [ -n "$TESTS_TYPE" ] && [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
+        terminate "Incorrect tests type specified: $TESTS_TYPE"
+    fi
+
+    if [ -z "$S3_TESTS_NODES_DISCOVERY" ]; then
+        terminate "Tests discovery URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_NODES_DISCOVERY" != */ ]]; then
+        S3_TESTS_NODES_DISCOVERY=${S3_TESTS_NODES_DISCOVERY}/
+    fi
+
+    if [ -z "$S3_TESTS_SUCCESS" ]; then
+        terminate "Tests success URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_SUCCESS" != */ ]]; then
+        S3_TESTS_SUCCESS=${S3_TESTS_SUCCESS}/
+    fi
+
+    if [ -z "$S3_TESTS_FAILURE" ]; then
+        terminate "Tests failure URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_FAILURE" != */ ]]; then
+        S3_TESTS_FAILURE=${S3_TESTS_FAILURE}/
+    fi
+
+    if [ -z "$S3_TESTS_IDLE" ]; then
+        terminate "Tests idle URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_IDLE" != */ ]]; then
+        S3_TESTS_IDLE=${S3_TESTS_IDLE}/
+    fi
+
+    if [ -z "$S3_TESTS_PREPARING" ]; then
+        terminate "Tests preparing URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_PREPARING" != */ ]]; then
+        S3_TESTS_PREPARING=${S3_TESTS_PREPARING}/
+    fi
+
+    if [ -z "$S3_TESTS_RUNNING" ]; then
+        terminate "Tests running URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_RUNNING" != */ ]]; then
+        S3_TESTS_RUNNING=${S3_TESTS_RUNNING}/
+    fi
+
+    if [ -z "$S3_TESTS_WAITING" ]; then
+        terminate "Tests waiting URL doesn't specified"
+    fi
+
+    if [[ "$S3_TESTS_WAITING" != */ ]]; then
+        S3_TESTS_WAITING=${S3_TESTS_WAITING}/
+    fi
+
+    if [ -z "$S3_IGNITE_NODES_DISCOVERY" ]; then
+        terminate "Ignite discovery URL doesn't specified"
+    fi
+
+    if [[ "$S3_IGNITE_NODES_DISCOVERY" != */ ]]; then
+        S3_IGNITE_NODES_DISCOVERY=${S3_IGNITE_NODES_DISCOVERY}/
+    fi
+
+    if [ -z "$S3_IGNITE_BOOTSTRAP_SUCCESS" ]; then
+        terminate "Ignite success URL doesn't specified"
+    fi
+
+    if [[ "$S3_IGNITE_BOOTSTRAP_SUCCESS" != */ ]]; then
+        S3_IGNITE_BOOTSTRAP_SUCCESS=${S3_IGNITE_BOOTSTRAP_SUCCESS}/
+    fi
+
+    if [ -z "$S3_IGNITE_BOOTSTRAP_FAILURE" ]; then
+        terminate "Ignite failure URL doesn't specified"
+    fi
+
+    if [[ "$S3_IGNITE_BOOTSTRAP_FAILURE" != */ ]]; then
+        S3_IGNITE_BOOTSTRAP_FAILURE=${S3_IGNITE_BOOTSTRAP_FAILURE}/
+    fi
+
+    if [ -z "$S3_CASSANDRA_NODES_DISCOVERY" ]; then
+        terminate "Cassandra discovery URL doesn't specified"
+    fi
+
+    if [[ "$S3_CASSANDRA_NODES_DISCOVERY" != */ ]]; then
+        S3_CASSANDRA_NODES_DISCOVERY=${S3_CASSANDRA_NODES_DISCOVERY}/
+    fi
+
+    if [ -z "$S3_CASSANDRA_BOOTSTRAP_SUCCESS" ]; then
+        terminate "Cassandra success URL doesn't specified"
+    fi
+
+    if [[ "$S3_CASSANDRA_BOOTSTRAP_SUCCESS" != */ ]]; then
+        S3_CASSANDRA_BOOTSTRAP_SUCCESS=${S3_CASSANDRA_BOOTSTRAP_SUCCESS}/
+    fi
+
+    if [ -z "$S3_CASSANDRA_BOOTSTRAP_FAILURE" ]; then
+        terminate "Cassandra failure URL doesn't specified"
+    fi
+
+    if [[ "$S3_CASSANDRA_BOOTSTRAP_FAILURE" != */ ]]; then
+        S3_CASSANDRA_BOOTSTRAP_FAILURE=${S3_CASSANDRA_BOOTSTRAP_FAILURE}/
+    fi
+
+    if [ -z "$S3_GANGLIA_MASTER_DISCOVERY" ]; then
+        terminate "Ganglia master discovery URL doesn't specified"
+    fi
+
+    if [[ "$S3_GANGLIA_MASTER_DISCOVERY" != */ ]]; then
+        S3_GANGLIA_MASTER_DISCOVERY=${S3_GANGLIA_MASTER_DISCOVERY}/
+    fi
+
+    if [ -z "$S3_GANGLIA_BOOTSTRAP_SUCCESS" ]; then
+        terminate "Ganglia master success URL doesn't specified"
+    fi
+
+    if [[ "$S3_GANGLIA_BOOTSTRAP_SUCCESS" != */ ]]; then
+        S3_GANGLIA_BOOTSTRAP_SUCCESS=${S3_GANGLIA_BOOTSTRAP_SUCCESS}/
+    fi
+
+    if [ -z "$S3_GANGLIA_BOOTSTRAP_FAILURE" ]; then
+        terminate "Ganglia master failure URL doesn't specified"
+    fi
+
+    if [[ "$S3_GANGLIA_BOOTSTRAP_FAILURE" != */ ]]; then
+        S3_GANGLIA_BOOTSTRAP_FAILURE=${S3_GANGLIA_BOOTSTRAP_FAILURE}/
+    fi
+}
+
+# Prints EC2 instance info
+printInstanceInfo()
+{
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "[INFO] Cassandra download URL: $CASSANDRA_DOWNLOAD_URL"
+        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
+        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
+        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
+        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
+        echo "[INFO] Logs URL: $S3_CASSANDRA_LOGS"
+        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
+        echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
+        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
+        echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK"
+        echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK"
+        echo "[INFO] Cassandra success URL: $S3_CASSANDRA_BOOTSTRAP_SUCCESS"
+        echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_BOOTSTRAP_FAILURE"
+    fi
+
+    if [ "$NODE_TYPE" == "ignite" ]; then
+        echo "[INFO] Ignite download URL: $IGNITE_DOWNLOAD_URL"
+        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
+        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
+        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
+        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
+        echo "[INFO] Logs URL: $S3_IGNITE_LOGS"
+        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
+        echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
+        echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
+        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
+        echo "[INFO] Ignite first node lock URL: $S3_IGNITE_FIRST_NODE_LOCK"
+        echo "[INFO] Ignite nodes join lock URL: $S3_IGNITE_NODES_JOIN_LOCK"
+        echo "[INFO] Ignite success URL: $S3_IGNITE_BOOTSTRAP_SUCCESS"
+        echo "[INFO] Ignite failure URL: $S3_IGNITE_BOOTSTRAP_FAILURE"
+    fi
+
+    if [ "$NODE_TYPE" == "test" ]; then
+        echo "[INFO] Tests type: $TESTS_TYPE"
+        echo "[INFO] Test nodes count: $TEST_NODES_COUNT"
+        echo "[INFO] Ignite nodes count: $IGNITE_NODES_COUNT"
+        echo "[INFO] Cassandra nodes count: $CASSANDRA_NODES_COUNT"
+        echo "[INFO] Tests summary URL: $S3_TESTS_SUMMARY"
+        echo "[INFO] ----------------------------------------------------"
+        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
+        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
+        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
+        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
+        echo "[INFO] Logs URL: $S3_TESTS_LOGS"
+        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
+        echo "[INFO] Test node discovery URL: $S3_TESTS_NODES_DISCOVERY"
+        echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
+        echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
+        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
+        echo "[INFO] Tests trigger URL: $S3_TESTS_TRIGGER"
+        echo "[INFO] Tests idle URL: $S3_TESTS_IDLE"
+        echo "[INFO] Tests preparing URL: $S3_TESTS_PREPARING"
+        echo "[INFO] Tests waiting URL: $S3_TESTS_WAITING"
+        echo "[INFO] Tests running URL: $S3_TESTS_RUNNING"
+        echo "[INFO] Tests success URL: $S3_TESTS_SUCCESS"
+        echo "[INFO] Tests failure URL: $S3_TESTS_FAILURE"
+        echo "[INFO] Ignite success URL: $S3_IGNITE_BOOTSTRAP_SUCCESS"
+        echo "[INFO] Ignite failure URL: $S3_IGNITE_BOOTSTRAP_FAILURE"
+        echo "[INFO] Cassandra success URL: $S3_CASSANDRA_BOOTSTRAP_SUCCESS"
+        echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_BOOTSTRAP_FAILURE"
+    fi
+
+    if [ "$NODE_TYPE" == "ganglia" ]; then
+        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
+        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
+        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
+        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
+        echo "[INFO] Logs URL: $S3_GANGLIA_LOGS"
+        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
+        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
+        echo "[INFO] Ganglia success URL: $S3_GANGLIA_BOOTSTRAP_SUCCESS"
+        echo "[INFO] Ganglia failure URL: $S3_GANGLIA_BOOTSTRAP_FAILURE"
+    fi
+}
+
+# Clone git repository
+gitClone()
+{
+    echo "[INFO] Cloning git repository $1 to $2"
+
+    rm -Rf $2
+
+    for i in 0 9;
+    do
+        git clone $1 $2
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] Git repository $1 was successfully cloned to $2"
+            return 0
+        fi
+
+        echo "[WARN] Failed to clone git repository $1 from $i attempt, sleeping extra 5sec"
+        rm -Rf $2
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to clone git repository $1 are failed"
+}
+
+# Applies specified tag to EC2 instance
+createTag()
+{
+    if [ -z "$EC2_INSTANCE_REGION" ]; then
+        EC2_AVAIL_ZONE=`curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone`
+        EC2_INSTANCE_REGION="`echo \"$EC2_AVAIL_ZONE\" | sed -e 's:\([0-9][0-9]*\)[a-z]*\$:\\1:'`"
+        export EC2_INSTANCE_REGION
+        echo "[INFO] EC2 instance region: $EC2_INSTANCE_REGION"
+    fi
+
+    for i in 0 9;
+    do
+        aws ec2 create-tags --resources $1 --tags Key=$2,Value=$3 --region $EC2_INSTANCE_REGION
+        if [ $? -eq 0 ]; then
+            return 0
+        fi
+
+        echo "[WARN] $i attempt to tag EC2 instance $1 with $2=$3 is failed, sleeping extra 5sec"
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to tag EC2 instance $1 with $2=$3 are failed"
+}
+
+# Applies 'owner', 'project' and 'Name' tags to EC2 instance
+tagInstance()
+{
+    export EC2_HOME=/opt/aws/apitools/ec2
+    export JAVA_HOME=/opt/java
+    export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
+
+    INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to get instance metadata to tag it"
+        exit 1
+    fi
+
+    INSTANCE_NAME=
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        INSTANCE_NAME=$EC2_CASSANDRA_TAG
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        INSTANCE_NAME=$EC2_IGNITE_TAG
+    elif [ "$NODE_TYPE" == "test" ]; then
+        INSTANCE_NAME=$EC2_TEST_TAG
+    elif [ "$NODE_TYPE" == "ganglia" ]; then
+        INSTANCE_NAME=$EC2_GANGLIA_TAG
+    fi
+
+    if [ -n "$INSTANCE_NAME" ]; then
+        createTag "$INSTANCE_ID" "Name" "${INSTANCE_NAME}"
+    fi
+
+    if [ -n "$EC2_OWNER_TAG" ]; then
+        createTag "$INSTANCE_ID" "owner" "${EC2_OWNER_TAG}"
+    fi
+
+    if [ -n "$EC2_PROJECT_TAG" ]; then
+        createTag "$INSTANCE_ID" "project" "${EC2_PROJECT_TAG}"
+    fi
+}
+
+# Sets NODE_TYPE env variable
+setNodeType()
+{
+    if [ -n "$1" ]; then
+        NEW_NODE_TYPE=$NODE_TYPE
+        NODE_TYPE=$1
+    else
+        NEW_NODE_TYPE=
+    fi
+}
+
+# Reverts NODE_TYPE env variable to previous value
+revertNodeType()
+{
+    if [ -n "$NEW_NODE_TYPE" ]; then
+        NODE_TYPE=$NEW_NODE_TYPE
+        NEW_NODE_TYPE=
+    fi
+}
+
+# Returns logs folder for the node (Cassandra, Ignite, Tests)
+getLocalLogsFolder()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "/opt/cassandra/logs"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "/opt/ignite/work/log"
+    elif [ "$NODE_TYPE" == "test" ]; then
+        echo "/opt/ignite-cassandra-tests/logs"
+    elif [ "$NODE_TYPE" == "ganglia" ]; then
+        echo ""
+    fi
+
+    revertNodeType
+}
+
+# Returns S3 URL to discover this node
+getDiscoveryUrl()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "$S3_CASSANDRA_NODES_DISCOVERY"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "$S3_IGNITE_NODES_DISCOVERY"
+    elif [ "$NODE_TYPE" == "test" ]; then
+        echo "$S3_TESTS_NODES_DISCOVERY"
+    elif [ "$NODE_TYPE" == "ganglia" ]; then
+        echo "$S3_GANGLIA_MASTER_DISCOVERY"
+    fi
+
+    revertNodeType
+}
+
+# Returns S3 URL used as a join lock, used by nodes to join cluster sequentially
+getJoinLockUrl()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "$S3_CASSANDRA_NODES_JOIN_LOCK"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "$S3_IGNITE_NODES_JOIN_LOCK"
+    fi
+
+    revertNodeType
+}
+
+# Returns S3 URL used to select first node for the cluster. The first node is responsible
+# for doing all routine work (clean S3 logs/test results from previous execution) on cluster startup
+getFirstNodeLockUrl()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "$S3_CASSANDRA_FIRST_NODE_LOCK"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "$S3_IGNITE_FIRST_NODE_LOCK"
+    elif [ "$NODE_TYPE" == "test" ]; then
+        echo "$S3_TESTS_FIRST_NODE_LOCK"
+    fi
+
+    revertNodeType
+}
+
+# Returns S3 success URL for the node - folder created in S3 in case node successfully started and containing node logs
+getSucessUrl()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "$S3_CASSANDRA_BOOTSTRAP_SUCCESS"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "$S3_IGNITE_BOOTSTRAP_SUCCESS"
+    elif [ "$NODE_TYPE" == "test" ]; then
+        echo "$S3_TESTS_SUCCESS"
+    elif [ "$NODE_TYPE" == "ganglia" ]; then
+        echo "$S3_GANGLIA_BOOTSTRAP_SUCCESS"
+    fi
+
+    revertNodeType
+}
+
+# Returns S3 failure URL for the node - folder created in S3 in case node failed to start and containing node logs
+getFailureUrl()
+{
+    setNodeType $1
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        echo "$S3_CASSANDRA_BOOTSTRAP_FAILURE"
+    elif [ "$NODE_TYPE" == "ignite" ]; then
+        echo "$S3_IGNITE_BOOTSTRAP_FAILURE"
+    elif [ "$NODE_TYPE" == "test" ]; then
+        echo "$S3_TESTS_FAILURE"
+    elif [ "$NODE_TYPE" == "ganglia" ]; then
+        echo "$S3_GANGLIA_BOOTSTRAP_FAILURE"
+    fi
+
+    revertNodeType
+}
+
+# Terminates script execution, unregisters node and removes all the locks (join lock, first node lock) created by it
+terminate()
+{
+    SUCCESS_URL=$(getSucessUrl)
+    FAILURE_URL=$(getFailureUrl)
+
+    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
+        SUCCESS_URL=${SUCCESS_URL}/
+    fi
+
+    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
+        FAILURE_URL=${FAILURE_URL}/
+    fi
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    msg=$HOST_NAME
+
+    if [ -n "$1" ]; then
+        echo "[ERROR] $1"
+        echo "[ERROR]-----------------------------------------------------"
+        echo "[ERROR] Failed to start $NODE_TYPE node"
+        echo "[ERROR]-----------------------------------------------------"
+        msg=$1
+        reportFolder=${FAILURE_URL}${HOST_NAME}
+        reportFile=$reportFolder/__error__
+    else
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] $NODE_TYPE node successfully started"
+        echo "[INFO]-----------------------------------------------------"
+        reportFolder=${SUCCESS_URL}${HOST_NAME}
+        reportFile=$reportFolder/__success__
+    fi
+
+    echo $msg > /opt/ignite-cassandra-tests/bootstrap/start_result
+
+    aws s3 rm --recursive $reportFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop report folder: $reportFolder"
+    fi
+
+    localLogs=$(getLocalLogsFolder)
+
+    if [ -d "$localLogs" ]; then
+        aws s3 sync --sse AES256 $localLogs $reportFolder
+        if [ $? -ne 0 ]; then
+            echo "[ERROR] Failed to export $NODE_TYPE logs to: $reportFolder"
+        fi
+    fi
+
+    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/start_result $reportFile
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to export node start result to: $reportFile"
+    fi
+
+    rm -f /opt/ignite-cassandra-tests/bootstrap/start_result /opt/ignite-cassandra-tests/bootstrap/join-lock /opt/ignite-cassandra-tests/bootstrap/first-node-lock
+
+    removeClusterJoinLock
+
+    if [ "$NODE_TYPE" == "test" ]; then
+        aws s3 rm ${S3_TESTS_RUNNING}${HOST_NAME}
+        aws s3 rm ${S3_TESTS_WAITING}${HOST_NAME}
+        aws s3 rm ${S3_TESTS_IDLE}${HOST_NAME}
+        aws s3 rm ${S3_TESTS_PREPARING}${HOST_NAME}
+        unregisterNode
+    fi
+
+    if [ -n "$1" ]; then
+        removeFirstNodeLock
+        unregisterNode
+        exit 1
+    fi
+
+    exit 0
+}
+
+# Registers node by creating a file having node hostname inside specific folder in S3
+registerNode()
+{
+    DISCOVERY_URL=$(getDiscoveryUrl)
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "[INFO] Registering $NODE_TYPE node: ${DISCOVERY_URL}${HOST_NAME}"
+
+    aws s3 cp --sse AES256 /etc/hosts ${DISCOVERY_URL}${HOST_NAME}
+    if [ $? -ne 0 ]; then
+        terminate "Failed to register $NODE_TYPE node info in: ${DISCOVERY_URL}${HOST_NAME}"
+    fi
+
+    echo "[INFO] $NODE_TYPE node successfully registered"
+}
+
+# Unregisters node by removing a file having node hostname inside specific folder in S3
+unregisterNode()
+{
+    DISCOVERY_URL=$(getDiscoveryUrl)
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "[INFO] Removing $NODE_TYPE node registration from: ${DISCOVERY_URL}${HOST_NAME}"
+
+    exists=$(aws s3 ls ${DISCOVERY_URL}${HOST_NAME})
+
+    if [ -n "$exists" ]; then
+        aws s3 rm ${DISCOVERY_URL}${HOST_NAME}
+
+        if [ $? -ne 0 ]; then
+            echo "[ERROR] Failed to remove $NODE_TYPE node registration"
+        else
+            echo "[INFO] $NODE_TYPE node registration removed"
+        fi
+    else
+        echo "[INFO] Node registration actually haven't been previously created"
+    fi
+}
+
+# Cleans up all nodes metadata for particular cluster (Cassandra, Ignite, Tests). Performed only by the node acquired
+# first node lock.
+cleanupMetadata()
+{
+    DISCOVERY_URL=$(getDiscoveryUrl)
+    JOIN_LOCK_URL=$(getJoinLockUrl)
+    SUCCESS_URL=$(getSucessUrl)
+    FAILURE_URL=$(getFailureUrl)
+
+    echo "[INFO] Running metadata cleanup"
+
+    aws s3 rm $JOIN_LOCK_URL
+    aws s3 rm --recursive $DISCOVERY_URL
+    aws s3 rm --recursive $SUCCESS_URL
+    aws s3 rm --recursive $FAILURE_URL
+
+    echo "[INFO] Metadata cleanup completed"
+}
+
+# Tries to get first node lock for the node. Only one (first) node can have such lock and it will be responsible for
+# cleanup process when starting cluster
+tryToGetFirstNodeLock()
+{
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        return 0
+    fi
+
+    FIRST_NODE_LOCK_URL=$(getFirstNodeLockUrl)
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "[INFO] Trying to get first node lock: $FIRST_NODE_LOCK_URL"
+
+    checkFirstNodeLockExist $FIRST_NODE_LOCK_URL
+    if [ $? -ne 0 ]; then
+        return 1
+    fi
+
+    echo "$HOST_NAME" > /opt/ignite-cassandra-tests/bootstrap/first-node-lock
+
+    createFirstNodeLock $FIRST_NODE_LOCK_URL
+
+    sleep 5s
+
+    rm -Rf /opt/ignite-cassandra-tests/bootstrap/first-node-lock
+
+    aws s3 cp $FIRST_NODE_LOCK_URL /opt/ignite-cassandra-tests/bootstrap/first-node-lock
+    if [ $? -ne 0 ]; then
+        echo "[WARN] Failed to check just created first node lock"
+        return 1
+    fi
+
+    first_host=$(cat /opt/ignite-cassandra-tests/bootstrap/first-node-lock)
+
+    rm -f /opt/ignite-cassandra-tests/bootstrap/first-node-lock
+
+    if [ "$first_host" != "$HOST_NAME" ]; then
+        echo "[INFO] Node $first_host has discarded previously created first node lock"
+        return 1
+    fi
+
+    echo "[INFO] Congratulations, got first node lock"
+
+    FIRST_NODE_LOCK="true"
+
+    return 0
+}
+
+# Checks if first node lock already exists in S3
+checkFirstNodeLockExist()
+{
+    echo "[INFO] Checking for the first node lock: $1"
+
+    lockExists=$(aws s3 ls $1)
+    if [ -n "$lockExists" ]; then
+        echo "[INFO] First node lock already exists"
+        return 1
+    fi
+
+    echo "[INFO] First node lock doesn't exist"
+
+    return 0
+}
+
+# Creates first node lock in S3
+createFirstNodeLock()
+{
+    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/first-node-lock $1
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to create first node lock: $1"
+    fi
+
+    echo "[INFO] Created first node lock: $1"
+}
+
+# Removes first node lock from S3
+removeFirstNodeLock()
+{
+    if [ "$FIRST_NODE_LOCK" != "true" ]; then
+        return 0
+    fi
+
+    FIRST_NODE_LOCK_URL=$(getFirstNodeLockUrl)
+
+    echo "[INFO] Removing first node lock: $FIRST_NODE_LOCK_URL"
+
+    aws s3 rm $FIRST_NODE_LOCK_URL
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to remove first node lock: $FIRST_NODE_LOCK_URL"
+    fi
+
+    echo "[INFO] Removed first node lock: $FIRST_NODE_LOCK_URL"
+
+    FIRST_NODE_LOCK="false"
+}
+
+# Tries to get cluster join lock. Nodes use this lock to join a cluster sequentially.
+tryToGetClusterJoinLock()
+{
+    if [ "$JOIN_LOCK" == "true" ]; then
+        return 0
+    fi
+
+    JOIN_LOCK_URL=$(getJoinLockUrl)
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "[INFO] Trying to get cluster join lock"
+
+    checkClusterJoinLockExist $JOIN_LOCK_URL
+    if [ $? -ne 0 ]; then
+        return 1
+    fi
+
+    echo "$HOST_NAME" > /opt/ignite-cassandra-tests/bootstrap/join-lock
+
+    createClusterJoinLock $JOIN_LOCK_URL
+
+    sleep 5s
+
+    rm -Rf /opt/ignite-cassandra-tests/bootstrap/join-lock
+
+    aws s3 cp $JOIN_LOCK_URL /opt/ignite-cassandra-tests/bootstrap/join-lock
+    if [ $? -ne 0 ]; then
+        echo "[WARN] Failed to check just created cluster join lock"
+        return 1
+    fi
+
+    join_host=$(cat /opt/ignite-cassandra-tests/bootstrap/join-lock)
+
+    if [ "$join_host" != "$HOST_NAME" ]; then
+        echo "[INFO] Node $first_host has discarded previously created cluster join lock"
+        return 1
+    fi
+
+    echo "[INFO] Congratulations, got cluster join lock"
+
+    JOIN_LOCK="true"
+
+    return 0
+}
+
+# Checks if join lock already exists in S3
+checkClusterJoinLockExist()
+{
+    echo "[INFO] Checking for the cluster join lock: $1"
+
+    lockExists=$(aws s3 ls $1)
+    if [ -n "$lockExists" ]; then
+        echo "[INFO] Cluster join lock already exists"
+        return 1
+    fi
+
+    if [ "$NODE_TYPE" == "cassandra" ]; then
+        status=$(/opt/cassandra/bin/nodetool -h $CASSANDRA_SEED status)
+        leaving=$(echo $status | grep UL)
+        moving=$(echo $status | grep UM)
+        joining=$(echo $status | grep UJ)
+
+        if [ -n "$leaving" ] || [ -n "$moving" ] || [ -n "$joining" ]; then
+            echo "[INFO] Cluster join lock doesn't exist in S3, but some node still trying to join Cassandra cluster"
+            return 1
+        fi
+    fi
+
+    echo "[INFO] Cluster join lock doesn't exist"
+}
+
+# Creates join lock in S3
+createClusterJoinLock()
+{
+    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/join-lock $1
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to create cluster join lock: $1"
+    fi
+
+    echo "[INFO] Created cluster join lock: $1"
+}
+
+# Removes join lock
+removeClusterJoinLock()
+{
+    if [ "$JOIN_LOCK" != "true" ]; then
+        return 0
+    fi
+
+    JOIN_LOCK_URL=$(getJoinLockUrl)
+
+    echo "[INFO] Removing cluster join lock: $JOIN_LOCK_URL"
+
+    aws s3 rm $JOIN_LOCK_URL
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to remove cluster join lock: $JOIN_LOCK_URL"
+    fi
+
+    JOIN_LOCK="false"
+
+    echo "[INFO] Removed cluster join lock: $JOIN_LOCK_URL"
+}
+
+# Waits for the node to join cluster, periodically trying to acquire cluster join lock and exiting only when node
+# successfully acquired the lock. Such mechanism used by nodes to join cluster sequentially (limitation of Cassandra).
+waitToJoinCluster()
+{
+    echo "[INFO] Waiting to join $NODE_TYPE cluster"
+
+    while true; do
+        tryToGetClusterJoinLock
+
+        if [ $? -ne 0 ]; then
+            echo "[INFO] Another node is trying to join cluster. Waiting for extra 30sec."
+            sleep 30s
+        else
+            echo "[INFO]-------------------------------------------------------------"
+            echo "[INFO] Congratulations, got lock to join $NODE_TYPE cluster"
+            echo "[INFO]-------------------------------------------------------------"
+            break
+        fi
+    done
+}
+
+# Wait for the cluster to register at least one node in S3, so that all other nodes will use already existing nodes
+# to send them info about them and join the cluster
+setupClusterSeeds()
+{
+    if [ "$1" != "cassandra" ] && [ "$1" != "ignite" ] && [ "$1" != "test" ]; then
+        terminate "Incorrect cluster type specified '$1' to setup seeds"
+    fi
+
+    DISCOVERY_URL=$(getDiscoveryUrl $1)
+
+    echo "[INFO] Setting up $1 seeds"
+
+    echo "[INFO] Looking for $1 seeds in: $DISCOVERY_URL"
+
+    startTime=$(date +%s)
+
+    while true; do
+        seeds=$(aws s3 ls $DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
+        if [ -n "$seeds" ]; then
+            seeds=($seeds)
+            length=${#seeds[@]}
+
+            if [ $length -lt 4 ]; then
+                seed1=${seeds[0]}
+                seed2=${seeds[1]}
+                seed3=${seeds[2]}
+            else
+                pos1=$(($RANDOM%$length))
+                pos2=$(($RANDOM%$length))
+                pos3=$(($RANDOM%$length))
+                seed1=${seeds[${pos1}]}
+                seed2=${seeds[${pos2}]}
+                seed3=${seeds[${pos3}]}
+            fi
+
+            CLUSTER_SEEDS=$seed1
+
+            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
+                CLUSTER_SEEDS="$CLUSTER_SEEDS $seed2"
+            fi
+
+            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
+                CLUSTER_SEEDS="$CLUSTER_SEEDS $seed3"
+            fi
+
+            echo "[INFO] Using $1 seeds: $CLUSTER_SEEDS"
+
+            return 0
+        fi
+
+        currentTime=$(date +%s)
+        duration=$(( $currentTime-$startTime ))
+        duration=$(( $duration/60 ))
+
+        if [ "$2" == "true" ]; then
+            if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
+                terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first $1 node is still not up and running"
+            fi
+        fi
+
+        echo "[INFO] Waiting for the first $1 node to start and publish its seed, time passed ${duration}min"
+
+        sleep 30s
+    done
+}
+
+# Wait until first cluster node registered in S3
+waitFirstClusterNodeRegistered()
+{
+    DISCOVERY_URL=$(getDiscoveryUrl)
+
+    echo "[INFO] Waiting for the first $NODE_TYPE node to register in: $DISCOVERY_URL"
+
+    startTime=$(date +%s)
+
+    while true; do
+        exists=$(aws s3 ls $DISCOVERY_URL)
+        if [ -n "$exists" ]; then
+            break
+        fi
+
+        if [ "$1" == "true" ]; then
+            currentTime=$(date +%s)
+            duration=$(( $currentTime-$startTime ))
+            duration=$(( $duration/60 ))
+
+            if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
+                terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first $type node is still not up and running"
+            fi
+        fi
+
+        echo "[INFO] Waiting extra 30sec"
+
+        sleep 30s
+    done
+
+    echo "[INFO] First $type node registered"
+}
+
+# Waits until all cluster nodes successfully bootstrapped. In case of Tests cluster also waits until all nodes
+# switch to waiting state
+waitAllClusterNodesReady()
+{
+    if [ "$1" == "cassandra" ]; then
+        NODES_COUNT=$CASSANDRA_NODES_COUNT
+    elif [ "$1" == "ignite" ]; then
+        NODES_COUNT=$IGNITE_NODES_COUNT
+    elif [ "$1" == "test" ]; then
+        NODES_COUNT=$TEST_NODES_COUNT
+    else
+        terminate "Incorrect cluster type specified '$1' to wait for all nodes up and running"
+    fi
+
+    SUCCESS_URL=$(getSucessUrl $1)
+
+    if [ $NODES_COUNT -eq 0 ]; then
+        return 0
+    fi
+
+    echo "[INFO] Waiting for all $NODES_COUNT $1 nodes ready"
+
+    while true; do
+        if [ "$1" == "test" ]; then
+            count1=$(aws s3 ls $S3_TESTS_WAITING | wc -l)
+            count2=$(aws s3 ls $S3_TESTS_RUNNING | wc -l)
+            count=$(( $count1+$count2 ))
+        else
+            count=$(aws s3 ls $SUCCESS_URL | wc -l)
+        fi
+
+        if [ $count -ge $NODES_COUNT ]; then
+            break
+        fi
+
+        echo "[INFO] Waiting extra 30sec"
+
+        sleep 30s
+    done
+
+    sleep 30s
+
+    echo "[INFO] Congratulation, all $NODES_COUNT $1 nodes are ready"
+}
+
+# Wait untill all Tests cluster nodes completed their tests execution
+waitAllTestNodesCompletedTests()
+{
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "[INFO] Waiting for all $TEST_NODES_COUNT test nodes to complete their tests"
+
+    while true; do
+
+        count=$(aws s3 ls $S3_TESTS_RUNNING | grep -v $HOST_NAME | wc -l)
+
+        if [ $count -eq 0 ]; then
+            break
+        fi
+
+        echo "[INFO] Waiting extra 30sec"
+
+        sleep 30s
+    done
+
+    echo "[INFO] Congratulation, all $TEST_NODES_COUNT test nodes have completed their tests"
+}
+
+# Installs all required Ganglia packages
+installGangliaPackages()
+{
+    if [ "$1" == "master" ]; then
+        echo "[INFO] Installing Ganglia master required packages"
+    else
+        echo "[INFO] Installing Ganglia agent required packages"
+    fi
+
+    isAmazonLinux=$(cat "/etc/issue" | grep "Amazon Linux")
+
+    if [ -z "$isAmazonLinux" ]; then
+        setenforce 0
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to turn off SELinux"
+        fi
+
+        downloadPackage "$EPEL_DOWNLOAD_URL" "/opt/epel.rpm" "EPEL"
+
+        rpm -Uvh /opt/epel.rpm
+        if [ $? -ne 0 ]; then
+            terminate "Failed to setup EPEL repository"
+        fi
+
+        rm -f /opt/epel.rpm
+    fi
+
+    yum -y install apr-devel apr-util check-devel cairo-devel pango-devel pango \
+    libxml2-devel glib2-devel dbus-devel freetype-devel freetype \
+    libpng-devel libart_lgpl-devel fontconfig-devel gcc-c++ expat-devel \
+    python-devel libXrender-devel perl-devel perl-CPAN gettext git sysstat \
+    automake autoconf ltmain.sh pkg-config gperf libtool pcre-devel libconfuse-devel
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install all Ganglia required packages"
+    fi
+
+    if [ "$1" == "master" ]; then
+        yum -y install httpd php php-devel php-pear
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to install all Ganglia required packages"
+        fi
+
+        if [ -z "$isAmazonLinux" ]; then
+            yum -y install liberation-sans-fonts
+
+            if [ $? -ne 0 ]; then
+                terminate "Failed to install liberation-sans-fonts package"
+            fi
+        fi
+    fi
+
+    if [ -z "$isAmazonLinux" ]; then
+        downloadPackage "$GPERF_DOWNLOAD_URL" "/opt/gperf.tar.gz" "gperf"
+
+        tar -xvzf /opt/gperf.tar.gz -C /opt
+        if [ $? -ne 0 ]; then
+            terminate "Failed to untar gperf tarball"
+        fi
+
+        rm -Rf /opt/gperf.tar.gz
+
+        unzipDir=$(ls /opt | grep "gperf")
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to update creation date to current for all files inside: /opt/$unzipDir"
+        fi
+
+        pushd /opt/$unzipDir
+
+        cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
+        rm ./configure
+        mv ./configure1 ./configure
+        chmod a+x ./configure
+
+        ./configure
+        if [ $? -ne 0 ]; then
+            terminate "Failed to configure gperf"
+        fi
+
+        make
+        if [ $? -ne 0 ]; then
+            terminate "Failed to make gperf"
+        fi
+
+        make install
+        if [ $? -ne 0 ]; then
+            terminate "Failed to install gperf"
+        fi
+
+        echo "[INFO] gperf tool successfully installed"
+
+        popd
+    fi
+
+    echo "[INFO] Installing rrdtool"
+
+    downloadPackage "$RRD_DOWNLOAD_URL" "/opt/rrdtool.tar.gz" "rrdtool"
+
+    tar -xvzf /opt/rrdtool.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar rrdtool tarball"
+    fi
+
+    rm -Rf /opt/rrdtool.tar.gz
+
+    unzipDir=$(ls /opt | grep "rrdtool")
+    if [ "$unzipDir" != "rrdtool" ]; then
+        mv /opt/$unzipDir /opt/rrdtool
+    fi
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to update creation date to current for all files inside: /opt/rrdtool"
+    fi
+
+    export PKG_CONFIG_PATH=/usr/lib/pkgconfig/
+
+    pushd /opt/rrdtool
+
+    cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
+    rm ./configure
+    mv ./configure1 ./configure
+    chmod a+x ./configure
+
+    ./configure --prefix=/usr/local/rrdtool
+    if [ $? -ne 0 ]; then
+        terminate "Failed to configure rrdtool"
+    fi
+
+    make
+    if [ $? -ne 0 ]; then
+        terminate "Failed to make rrdtool"
+    fi
+
+    make install
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install rrdtool"
+    fi
+
+    ln -s /usr/local/rrdtool/bin/rrdtool /usr/bin/rrdtool
+    mkdir -p /var/lib/ganglia/rrds
+
+    chown -R nobody:nobody /usr/local/rrdtool /var/lib/ganglia/rrds /usr/bin/rrdtool
+
+    rm -Rf /opt/rrdtool
+
+    popd
+
+    echo "[INFO] rrdtool successfully installed"
+
+    echo "[INFO] Installig ganglia-core"
+
+    gitClone $GANGLIA_CORE_DOWNLOAD_URL /opt/monitor-core
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to update creation date to current for all files inside: /opt/monitor-core"
+    fi
+
+    pushd /opt/monitor-core
+
+    git checkout efe9b5e5712ea74c04e3b15a06eb21900e18db40
+
+    ./bootstrap
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to prepare ganglia-core for compilation"
+    fi
+
+    cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
+    rm ./configure
+    mv ./configure1 ./configure
+    chmod a+x ./configure
+
+    ./configure --with-gmetad --with-librrd=/usr/local/rrdtool
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to configure ganglia-core"
+    fi
+
+    make
+    if [ $? -ne 0 ]; then
+        terminate "Failed to make ganglia-core"
+    fi
+
+    make install
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install ganglia-core"
+    fi
+
+    rm -Rf /opt/monitor-core
+
+    popd
+
+    echo "[INFO] ganglia-core successfully installed"
+
+    if [ "$1" != "master" ]; then
+        return 0
+    fi
+
+    echo "[INFO] Installing ganglia-web"
+
+    gitClone $GANGLIA_WEB_DOWNLOAD_URL /opt/web
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to update creation date to current for all files inside: /opt/web"
+    fi
+
+    cat /opt/web/Makefile | sed -r "s/GDESTDIR = \/usr\/share\/ganglia-webfrontend/GDESTDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile1
+    cat /opt/web/Makefile1 | sed -r "s/GCONFDIR = \/etc\/ganglia-web/GCONFDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile2
+    cat /opt/web/Makefile2 | sed -r "s/GWEB_STATEDIR = \/var\/lib\/ganglia-web/GWEB_STATEDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile3
+    cat /opt/web/Makefile3 | sed -r "s/APACHE_USER = www-data/APACHE_USER = apache/g" > /opt/web/Makefile4
+
+    rm -f /opt/web/Makefile
+    cp /opt/web/Makefile4 /opt/web/Makefile
+    rm -f /opt/web/Makefile1 /opt/web/Makefile2 /opt/web/Makefile3 /opt/web/Makefile4
+
+    pushd /opt/web
+
+    git checkout f2b19c7cacfc8c51921be801b92f8ed0bd4901ae
+
+    make
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to make ganglia-web"
+    fi
+
+    make install
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install ganglia-web"
+    fi
+
+    rm -Rf /opt/web
+
+    popd
+
+    echo "" >> /etc/httpd/conf/httpd.conf
+    echo "Alias /ganglia /opt/ganglia-web" >> /etc/httpd/conf/httpd.conf
+    echo "<Directory \"/opt/ganglia-web\">" >> /etc/httpd/conf/httpd.conf
+    echo "       AllowOverride All" >> /etc/httpd/conf/httpd.conf
+    echo "       Order allow,deny" >> /etc/httpd/conf/httpd.conf
+
+    if [ -z "$isAmazonLinux" ]; then
+        echo "       Require all granted" >> /etc/httpd/conf/httpd.conf
+    fi
+
+    echo "       Allow from all" >> /etc/httpd/conf/httpd.conf
+    echo "       Deny from none" >> /etc/httpd/conf/httpd.conf
+    echo "</Directory>" >> /etc/httpd/conf/httpd.conf
+
+    echo "[INFO] ganglia-web successfully installed"
+}
+
+# Setup ntpd service
+setupNTP()
+{
+    echo "[INFO] Installing ntp package"
+
+    yum -y install ntp
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install ntp package"
+    fi
+
+    echo "[INFO] Starting ntpd service"
+
+    service ntpd restart
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to restart ntpd service"
+    fi
+}
+
+# Installs and run Ganglia agent ('gmond' daemon)
+bootstrapGangliaAgent()
+{
+    echo "[INFO]-----------------------------------------------------------------"
+    echo "[INFO] Bootstrapping Ganglia agent"
+    echo "[INFO]-----------------------------------------------------------------"
+
+    installGangliaPackages
+
+    echo "[INFO] Running ganglia agent daemon to discover Ganglia master"
+
+    /opt/ignite-cassandra-tests/bootstrap/aws/ganglia/agent-start.sh $1 $2 > /opt/ganglia-agent.log &
+
+    echo "[INFO] Ganglia daemon job id: $!"
+}
+
+# Partitioning, formatting to ext4 and mounting all unpartitioned drives.
+# As a result env array MOUNT_POINTS provides all newly created mount points.
+mountUnpartitionedDrives()
+{
+    MOUNT_POINTS=
+
+    echo "[INFO] Mounting unpartitioned drives"
+
+    lsblk -V &> /dev/null
+
+    if [ $? -ne 0 ]; then
+        echo "[WARN] lsblk utility doesn't exist"
+        echo "[INFO] Installing util-linux-ng package"
+
+        yum -y install util-linux-ng
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to install util-linux-ng package"
+        fi
+    fi
+
+    parted -v &> /dev/null
+
+    if [ $? -ne 0 ]; then
+        echo "[WARN] parted utility doesn't exist"
+        echo "[INFO] Installing parted package"
+
+        yum -y install parted
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to install parted package"
+        fi
+    fi
+
+    drives=$(lsblk -io KNAME,TYPE | grep disk | sed -r "s/disk//g" | xargs)
+
+    echo "[INFO] Found HDDs: $drives"
+
+    unpartDrives=
+    partDrives=$(lsblk -io KNAME,TYPE | grep part | sed -r "s/[0-9]*//g" | sed -r "s/part//g" | xargs)
+
+    drives=($drives)
+	count=${#drives[@]}
+	iter=1
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+	do
+		drive=${drives[$i]}
+
+        if [ -z "$drive" ]; then
+            continue
+        fi
+
+        isPartitioned=$(echo $partDrives | grep "$drive")
+
+        if [ -n "$isPartitioned" ]; then
+            continue
+        fi
+
+        echo "[INFO] Creating partition for the drive: $drive"
+
+        parted -s -a opt /dev/$drive mklabel gpt mkpart primary 0% 100%
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create partition for the drive: $drive"
+        fi
+
+        partition=$(lsblk -io KNAME,TYPE | grep part | grep $drive | sed -r "s/part//g" | xargs)
+
+        echo "[INFO] Successfully created partition $partition for the drive: $drive"
+
+        echo "[INFO] Formatting partition /dev/$partition to ext4"
+
+        mkfs.ext4 -F -q /dev/$partition
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to format partition: /dev/$partition"
+        fi
+
+        echo "[INFO] Partition /dev/$partition was successfully formatted to ext4"
+
+        echo "[INFO] Mounting partition /dev/$partition to /storage$iter"
+
+        mkdir -p /storage$iter
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create mount point directory: /storage$iter"
+        fi
+
+        echo "/dev/$partition               /storage$iter               ext4    defaults        1 1" >> /etc/fstab
+
+        mount /storage$iter
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to mount /storage$iter mount point for partition /dev/$partition"
+        fi
+
+        echo "[INFO] Partition /dev/$partition was successfully mounted to /storage$iter"
+
+        if [ -n "$MOUNT_POINTS" ]; then
+            MOUNT_POINTS="$MOUNT_POINTS "
+        fi
+
+        MOUNT_POINTS="${MOUNT_POINTS}/storage${iter}"
+
+        iter=$(($iter+1))
+    done
+
+    if [ -z "$MOUNT_POINTS" ]; then
+        echo "[INFO] All drives already have partitions created"
+    fi
+
+    MOUNT_POINTS=($MOUNT_POINTS)
+}
+
+# Creates storage directories for Cassandra: data files, commit log, saved caches.
+# As a result CASSANDRA_DATA_DIR, CASSANDRA_COMMITLOG_DIR, CASSANDRA_CACHES_DIR will point to appropriate directories.
+createCassandraStorageLayout()
+{
+    CASSANDRA_DATA_DIR=
+    CASSANDRA_COMMITLOG_DIR=
+    CASSANDRA_CACHES_DIR=
+
+    mountUnpartitionedDrives
+
+    echo "[INFO] Creating Cassandra storage layout"
+
+	count=${#MOUNT_POINTS[@]}
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+    do
+        mountPoint=${MOUNT_POINTS[$i]}
+
+        if [ -z "$CASSANDRA_DATA_DIR" ]; then
+            CASSANDRA_DATA_DIR=$mountPoint
+        elif [ -z "$CASSANDRA_COMMITLOG_DIR" ]; then
+            CASSANDRA_COMMITLOG_DIR=$mountPoint
+        elif [ -z "$CASSANDRA_CACHES_DIR" ]; then
+            CASSANDRA_CACHES_DIR=$mountPoint
+        else
+            CASSANDRA_DATA_DIR="$CASSANDRA_DATA_DIR $mountPoint"
+        fi
+    done
+
+    if [ -z "$CASSANDRA_DATA_DIR" ]; then
+        CASSANDRA_DATA_DIR="/storage/cassandra/data"
+    else
+        CASSANDRA_DATA_DIR="$CASSANDRA_DATA_DIR/cassandra_data"
+    fi
+
+    if [ -z "$CASSANDRA_COMMITLOG_DIR" ]; then
+        CASSANDRA_COMMITLOG_DIR="/storage/cassandra/commitlog"
+    else
+        CASSANDRA_COMMITLOG_DIR="$CASSANDRA_COMMITLOG_DIR/cassandra_commitlog"
+    fi
+
+    if [ -z "$CASSANDRA_CACHES_DIR" ]; then
+        CASSANDRA_CACHES_DIR="/storage/cassandra/saved_caches"
+    else
+        CASSANDRA_CACHES_DIR="$CASSANDRA_CACHES_DIR/cassandra_caches"
+    fi
+
+    echo "[INFO] Cassandra data dir: $CASSANDRA_DATA_DIR"
+    echo "[INFO] Cassandra commit log dir: $CASSANDRA_COMMITLOG_DIR"
+    echo "[INFO] Cassandra saved caches dir: $CASSANDRA_CACHES_DIR"
+
+    dirs=("$CASSANDRA_DATA_DIR $CASSANDRA_COMMITLOG_DIR $CASSANDRA_CACHES_DIR")
+
+	count=${#dirs[@]}
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+    do
+        directory=${dirs[$i]}
+
+        mkdir -p $directory
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create directory: $directory"
+        fi
+
+        chown -R cassandra:cassandra $directory
+
+        if [ $? -ne 0 ]; then
+            terminate "Failed to assign cassandra:cassandra as an owner of directory $directory"
+        fi
+    done
+
+    DATA_DIR_SPEC="\n"
+
+    dirs=($CASSANDRA_DATA_DIR)
+
+	count=${#dirs[@]}
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+    do
+        dataDir=${dirs[$i]}
+        DATA_DIR_SPEC="${DATA_DIR_SPEC}     - ${dataDir}\n"
+    done
+
+    CASSANDRA_DATA_DIR=$(echo $DATA_DIR_SPEC | sed -r "s/\//\\\\\//g")
+    CASSANDRA_COMMITLOG_DIR=$(echo $CASSANDRA_COMMITLOG_DIR | sed -r "s/\//\\\\\//g")
+    CASSANDRA_CACHES_DIR=$(echo $CASSANDRA_CACHES_DIR | sed -r "s/\//\\\\\//g")
+}
+
+# Attaches environment configuration settings
+. $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env.sh
+
+# Validates environment settings
+validate
+
+# Validates node type of EC2 instance
+if [ "$1" != "cassandra" ] && [ "$1" != "ignite" ] && [ "$1" != "test" ] && [ "$1" != "ganglia" ]; then
+    echo "[ERROR] Unsupported node type specified: $1"
+    exit 1
+fi
+
+# Sets node type of EC2 instance
+export NODE_TYPE=$1
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/env.sh b/modules/cassandra/store/src/test/bootstrap/aws/env.sh
new file mode 100644
index 0000000..031c5c3
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/env.sh
@@ -0,0 +1,113 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# This file specifies environment specific settings to bootstrap required infrastructure for:
+# -----------------------------------------------------------------------------------------------
+#
+#   1) Cassandra cluster
+#   2) Ignite cluster
+#   3) Tests cluster
+#   4) Ganglia agents to be installed on each clusters machine
+#   5) Ganglia master to collect metrics from agent and show graphs on Ganglia Web dashboard
+#
+# -----------------------------------------------------------------------------------------------
+
+# EC2 tagging related settings
+export EC2_OWNER_TAG=ignite@apache.org
+export EC2_PROJECT_TAG=ignite
+export EC2_CASSANDRA_TAG=CASSANDRA
+export EC2_IGNITE_TAG=IGNITE
+export EC2_TEST_TAG=TEST
+export EC2_GANGLIA_TAG=GANGLIA
+
+# Tests summary settings
+export CASSANDRA_NODES_COUNT=3
+export IGNITE_NODES_COUNT=3
+export TEST_NODES_COUNT=2
+export TESTS_TYPE="ignite"
+
+# Time (in minutes) to wait for Cassandra/Ignite node up and running and register it in S3
+export SERVICE_STARTUP_TIME=10
+
+# Number of attempts to start Cassandra/Ignite daemon
+export SERVICE_START_ATTEMPTS=3
+
+# Root S3 folder
+export S3_ROOT=s3://<bucket>/<folder>
+
+# S3 folder for downloads. You should put here ignite load tests jar archive
+# (you can also download here other required artifacts like Cassandra, Ignite and etc)
+export S3_DOWNLOADS=$S3_ROOT/test
+
+# S3 root system folders where to store all infrastructure info
+export S3_SYSTEM=$S3_ROOT/test1
+
+# S3 system folders to store cluster specific info
+export S3_CASSANDRA_SYSTEM=$S3_SYSTEM/cassandra
+export S3_IGNITE_SYSTEM=$S3_SYSTEM/ignite
+export S3_TESTS_SYSTEM=$S3_SYSTEM/tests
+export S3_GANGLIA_SYSTEM=$S3_SYSTEM/ganglia
+
+# Logs related settings
+export S3_LOGS_TRIGGER=$S3_SYSTEM/logs-trigger
+export S3_LOGS_ROOT=$S3_SYSTEM/logs
+export S3_CASSANDRA_LOGS=$S3_LOGS_ROOT/cassandra
+export S3_IGNITE_LOGS=$S3_LOGS_ROOT/ignite
+export S3_TESTS_LOGS=$S3_LOGS_ROOT/tests
+export S3_GANGLIA_LOGS=$S3_LOGS_ROOT/ganglia
+
+# Cassandra related settings
+export CASSANDRA_DOWNLOAD_URL=http://archive.apache.org/dist/cassandra/3.5/apache-cassandra-3.5-bin.tar.gz
+export S3_CASSANDRA_BOOTSTRAP_SUCCESS=$S3_CASSANDRA_SYSTEM/success
+export S3_CASSANDRA_BOOTSTRAP_FAILURE=$S3_CASSANDRA_SYSTEM/failure
+export S3_CASSANDRA_NODES_DISCOVERY=$S3_CASSANDRA_SYSTEM/discovery
+export S3_CASSANDRA_FIRST_NODE_LOCK=$S3_CASSANDRA_SYSTEM/first-node-lock
+export S3_CASSANDRA_NODES_JOIN_LOCK=$S3_CASSANDRA_SYSTEM/join-lock
+
+# Ignite related settings
+export IGNITE_DOWNLOAD_URL=$S3_DOWNLOADS/apache-ignite-fabric-1.8.0-SNAPSHOT-bin.zip
+export S3_IGNITE_BOOTSTRAP_SUCCESS=$S3_IGNITE_SYSTEM/success
+export S3_IGNITE_BOOTSTRAP_FAILURE=$S3_IGNITE_SYSTEM/failure
+export S3_IGNITE_NODES_DISCOVERY=$S3_IGNITE_SYSTEM/discovery
+export S3_IGNITE_FIRST_NODE_LOCK=$S3_IGNITE_SYSTEM/first-node-lock
+export S3_IGNITE_NODES_JOIN_LOCK=$S3_IGNITE_SYSTEM/i-join-lock
+
+# Tests related settings
+export TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.8.0-SNAPSHOT.zip
+export S3_TESTS_TRIGGER=$S3_SYSTEM/tests-trigger
+export S3_TESTS_NODES_DISCOVERY=$S3_TESTS_SYSTEM/discovery
+export S3_TESTS_SUCCESS=$S3_TESTS_SYSTEM/success
+export S3_TESTS_FAILURE=$S3_TESTS_SYSTEM/failure
+export S3_TESTS_IDLE=$S3_TESTS_SYSTEM/idle
+export S3_TESTS_PREPARING=$S3_TESTS_SYSTEM/preparing
+export S3_TESTS_WAITING=$S3_TESTS_SYSTEM/waiting
+export S3_TESTS_RUNNING=$S3_TESTS_SYSTEM/running
+export S3_TESTS_FIRST_NODE_LOCK=$S3_TESTS_SYSTEM/first-node-lock
+export S3_TESTS_SUMMARY=$S3_SYSTEM/t-summary.zip
+
+# Ganglia related settings
+export GANGLIA_CORE_DOWNLOAD_URL=https://github.com/ganglia/monitor-core.git
+export GANGLIA_WEB_DOWNLOAD_URL=https://github.com/ganglia/ganglia-web.git
+export RRD_DOWNLOAD_URL=http://oss.oetiker.ch/rrdtool/pub/rrdtool-1.3.1.tar.gz
+export GPERF_DOWNLOAD_URL=http://ftp.gnu.org/gnu/gperf/gperf-3.0.3.tar.gz
+export EPEL_DOWNLOAD_URL=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+export S3_GANGLIA_BOOTSTRAP_SUCCESS=$S3_GANGLIA_SYSTEM/success
+export S3_GANGLIA_BOOTSTRAP_FAILURE=$S3_GANGLIA_SYSTEM/failure
+export S3_GANGLIA_MASTER_DISCOVERY=$S3_GANGLIA_SYSTEM/discovery
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh
new file mode 100644
index 0000000..8e49c18
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh
@@ -0,0 +1,75 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Script to start Ganglia agent on EC2 node (used by agent-bootstrap.sh)
+# -----------------------------------------------------------------------------------------------
+
+. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ganglia"
+
+echo "[INFO] Running Ganglia agent discovery daemon for '$1' cluster using $2 port"
+
+# Waiting for the Ganglia master node up and running
+waitFirstClusterNodeRegistered
+
+DISCOVERY_URL=$(getDiscoveryUrl)
+
+masterNode=$(aws s3 ls $DISCOVERY_URL | head -1)
+masterNode=($masterNode)
+masterNode=${masterNode[3]}
+masterNode=$(echo $masterNode | xargs)
+
+if [ $? -ne 0 ] || [ -z "$masterNode" ]; then
+    echo "[ERROR] Failed to get Ganglia master node from: $DISCOVERY_URL"
+fi
+
+echo "[INFO] Got Ganglia master node: $masterNode"
+
+echo "[INFO] Creating gmond config file"
+
+/usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
+
+cat /opt/gmond-default.conf | sed -r "s/deaf = no/deaf = yes/g" | \
+sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
+sed -r "s/#bind_hostname/bind_hostname/g" | \
+sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $masterNode/g" | \
+sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
+sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond.conf
+
+echo "[INFO] Running gmond daemon to report to gmetad on $masterNode"
+
+/usr/local/sbin/gmond --conf=/opt/gmond.conf -p /opt/gmond.pid
+
+sleep 2s
+
+if [ ! -f "/opt/gmond.pid" ]; then
+    echo "[ERROR] Failed to start gmond daemon, pid file doesn't exist"
+    exit 1
+fi
+
+pid=$(cat /opt/gmond.pid)
+
+echo "[INFO] gmond daemon started, pid=$pid"
+
+exists=$(ps $pid | grep gmond)
+
+if [ -z "$exists" ]; then
+    echo "[ERROR] gmond daemon abnormally terminated"
+    exit 1
+fi
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh
new file mode 100644
index 0000000..15fa044
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh
@@ -0,0 +1,417 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Bootstrap script to spin up Ganglia master
+# -----------------------------------------------------------------------------------------------
+
+# URL to download AWS CLI tools
+AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
+
+# URL to download JDK
+JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
+
+# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
+TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
+
+# Terminates script execution and upload logs to S3
+terminate()
+{
+    SUCCESS_URL=$S3_GANGLIA_BOOTSTRAP_SUCCESS
+    FAILURE_URL=$S3_GANGLIA_BOOTSTRAP_FAILURE
+
+    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
+        SUCCESS_URL=${SUCCESS_URL}/
+    fi
+
+    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
+        FAILURE_URL=${FAILURE_URL}/
+    fi
+
+    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    msg=$host_name
+
+    if [ -n "$1" ]; then
+        echo "[ERROR] $1"
+        echo "[ERROR]-----------------------------------------------------"
+        echo "[ERROR] Ganglia master node bootstrap failed"
+        echo "[ERROR]-----------------------------------------------------"
+        msg=$1
+
+        if [ -z "$FAILURE_URL" ]; then
+            exit 1
+        fi
+
+        reportFolder=${FAILURE_URL}${host_name}
+        reportFile=$reportFolder/__error__
+    else
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Ganglia master node bootstrap successfully completed"
+        echo "[INFO]-----------------------------------------------------"
+
+        if [ -z "$SUCCESS_URL" ]; then
+            exit 0
+        fi
+
+        reportFolder=${SUCCESS_URL}${host_name}
+        reportFile=$reportFolder/__success__
+    fi
+
+    echo $msg > /opt/bootstrap-result
+
+    aws s3 rm --recursive $reportFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop report folder: $reportFolder"
+    fi
+
+    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
+    fi
+
+    rm -f /opt/bootstrap-result
+
+    if [ -n "$1" ]; then
+        exit 1
+    fi
+
+    exit 0
+}
+
+# Downloads specified package
+downloadPackage()
+{
+    echo "[INFO] Downloading $3 package from $1 into $2"
+
+    for i in 0 9;
+    do
+        if [[ "$1" == s3* ]]; then
+            aws s3 cp $1 $2
+            code=$?
+        else
+            curl "$1" -o "$2"
+            code=$?
+        fi
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] $3 package successfully downloaded from $1 into $2"
+            return 0
+        fi
+
+        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to download $3 package from $1 are failed"
+}
+
+# Downloads and setup JDK
+setupJava()
+{
+    rm -Rf /opt/java /opt/jdk.tar.gz
+
+    echo "[INFO] Downloading 'jdk'"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
+    if [ $? -ne 0 ]; then
+        terminate "Failed to download 'jdk'"
+    fi
+
+    echo "[INFO] Untaring 'jdk'"
+    tar -xvzf /opt/jdk.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar 'jdk'"
+    fi
+
+    rm -Rf /opt/jdk.tar.gz
+
+    unzipDir=$(ls /opt | grep "jdk")
+    if [ "$unzipDir" != "java" ]; then
+        mv /opt/$unzipDir /opt/java
+    fi
+}
+
+# Downloads and setup AWS CLI
+setupAWSCLI()
+{
+    echo "[INFO] Installing 'awscli'"
+    pip install --upgrade awscli
+    if [ $? -eq 0 ]; then
+        return 0
+    fi
+
+    echo "[ERROR] Failed to install 'awscli' using pip"
+    echo "[INFO] Trying to install awscli using zip archive"
+    echo "[INFO] Downloading awscli zip"
+
+    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
+
+    echo "[INFO] Unzipping awscli zip"
+    unzip /opt/awscli-bundle.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip awscli zip"
+    fi
+
+    rm -Rf /opt/awscli-bundle.zip
+
+    echo "[INFO] Installing awscli"
+    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install awscli"
+    fi
+
+    echo "[INFO] Successfully installed awscli from zip archive"
+}
+
+# Setup all the pre-requisites (packages, settings and etc.)
+setupPreRequisites()
+{
+    echo "[INFO] Installing 'wget' package"
+    yum -y install wget
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'wget' package"
+    fi
+
+    echo "[INFO] Installing 'net-tools' package"
+    yum -y install net-tools
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'net-tools' package"
+    fi
+
+    echo "[INFO] Installing 'python' package"
+    yum -y install python
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'python' package"
+    fi
+
+    echo "[INFO] Installing 'unzip' package"
+    yum -y install unzip
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'unzip' package"
+    fi
+
+    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
+
+    echo "[INFO] Installing 'pip'"
+    python /opt/get-pip.py
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'pip'"
+    fi
+}
+
+# Downloads and setup tests package
+setupTestsPackage()
+{
+    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
+
+    rm -Rf /opt/ignite-cassandra-tests
+
+    unzip /opt/ignite-cassandra-tests.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip tests package"
+    fi
+
+    rm -f /opt/ignite-cassandra-tests.zip
+
+    unzipDir=$(ls /opt | grep "ignite-cassandra")
+    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
+        mv /opt/$unzipDir /opt/ignite-cassandra-tests
+    fi
+
+    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
+
+    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ganglia"
+
+    setupNTP
+
+    echo "[INFO] Starting logs collector daemon"
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_GANGLIA_LOGS/$HOST_NAME" "/var/log/httpd" > /opt/logs-collector.log &
+
+    echo "[INFO] Logs collector daemon started: $!"
+
+    echo "----------------------------------------------------------------------------------------"
+    printInstanceInfo
+    echo "----------------------------------------------------------------------------------------"
+    tagInstance
+}
+
+# Creates config file for 'gmond' damon working in receiver mode
+createGmondReceiverConfig()
+{
+    /usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
+    if [ $? -ne 0 ]; then
+        terminate "Failed to create gmond default config in: /opt/gmond-default.txt"
+    fi
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    cat /opt/gmond-default.conf | sed -r "s/mute = no/mute = yes/g" | \
+    sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
+    sed -r "s/#bind_hostname/bind_hostname/g" | \
+    sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $HOST_NAME/g" | \
+    sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
+    sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond-${1}.conf
+
+    chmod a+r /opt/gmond-${1}.conf
+
+    rm -f /opt/gmond-default.conf
+}
+
+# Creates config file for 'gmond' damon working in sender-receiver mode
+createGmondSenderReceiverConfig()
+{
+    /usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
+    if [ $? -ne 0 ]; then
+        terminate "Failed to create gmond default config in: /opt/gmond-default.txt"
+    fi
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    cat /opt/gmond-default.conf | sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
+    sed -r "s/#bind_hostname/bind_hostname/g" | \
+    sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $HOST_NAME/g" | \
+    sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
+    sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond-${1}.conf
+
+    chmod a+r /opt/gmond-${1}.conf
+
+    rm -f /opt/gmond-default.conf
+}
+
+# Downloads and setup Ganglia (and dependency) packages
+setupGangliaPackages()
+{
+    installGangliaPackages "master"
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    echo "data_source \"cassandra\" ${HOST_NAME}:8641" > /opt/gmetad.conf
+    echo "data_source \"ignite\" ${HOST_NAME}:8642" >> /opt/gmetad.conf
+    echo "data_source \"test\" ${HOST_NAME}:8643" >> /opt/gmetad.conf
+    #echo "data_source \"ganglia\" ${HOST_NAME}:8644" >> /opt/gmetad.conf
+    echo "setuid_username \"nobody\"" >> /opt/gmetad.conf
+    echo "case_sensitive_hostnames 0" >> /opt/gmetad.conf
+
+    chmod a+r /opt/gmetad.conf
+
+    createGmondReceiverConfig cassandra 8641
+    createGmondReceiverConfig ignite 8642
+    createGmondReceiverConfig test 8643
+    #createGmondSenderReceiverConfig ganglia 8644
+}
+
+# Starts 'gmond' receiver damon
+startGmondReceiver()
+{
+    configFile=/opt/gmond-${1}.conf
+    pidFile=/opt/gmond-${1}.pid
+
+    echo "[INFO] Starting gmond receiver daemon for $1 cluster using config file: $configFile"
+
+    rm -f $pidFile
+
+    /usr/local/sbin/gmond --conf=$configFile --pid-file=$pidFile
+
+    sleep 2s
+
+    if [ ! -f "$pidFile" ]; then
+        terminate "Failed to start gmond daemon for $1 cluster, pid file doesn't exist"
+    fi
+
+    pid=$(cat $pidFile)
+
+    echo "[INFO] gmond daemon for $1 cluster started, pid=$pid"
+
+    exists=$(ps $pid | grep gmond)
+
+    if [ -z "$exists" ]; then
+        terminate "gmond daemon for $1 cluster abnormally terminated"
+    fi
+}
+
+# Starts 'gmetad' daemon
+startGmetadCollector()
+{
+    echo "[INFO] Starting gmetad daemon"
+
+    rm -f /opt/gmetad.pid
+
+    /usr/local/sbin/gmetad --conf=/opt/gmetad.conf --pid-file=/opt/gmetad.pid
+
+    sleep 2s
+
+    if [ ! -f "/opt/gmetad.pid" ]; then
+        terminate "Failed to start gmetad daemon, pid file doesn't exist"
+    fi
+
+    pid=$(cat /opt/gmetad.pid)
+
+    echo "[INFO] gmetad daemon started, pid=$pid"
+
+    exists=$(ps $pid | grep gmetad)
+
+    if [ -z "$exists" ]; then
+        terminate "gmetad daemon abnormally terminated"
+    fi
+}
+
+# Starts Apache 'httpd' service
+startHttpdService()
+{
+    echo "[INFO] Starting httpd service"
+
+    service httpd start
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to start httpd service"
+    fi
+
+    sleep 5s
+
+    exists=$(service httpd status | grep running)
+    if [ -z "$exists" ]; then
+        terminate "httpd service process terminated"
+    fi
+
+    echo "[INFO] httpd service successfully started"
+}
+
+###################################################################################################################
+
+echo "[INFO]-----------------------------------------------------------------"
+echo "[INFO] Bootstrapping Ganglia master server"
+echo "[INFO]-----------------------------------------------------------------"
+
+setupPreRequisites
+setupJava
+setupAWSCLI
+setupTestsPackage
+setupGangliaPackages
+
+registerNode
+
+startGmondReceiver cassandra
+startGmondReceiver ignite
+startGmondReceiver test
+#startGmondReceiver ganglia
+startGmetadCollector
+startHttpdService
+
+terminate
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
new file mode 100644
index 0000000..7f97ea1
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
@@ -0,0 +1,336 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Bootstrap script to spin up Ignite cluster
+# -----------------------------------------------------------------------------------------------
+
+# URL to download AWS CLI tools
+AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
+
+# URL to download JDK
+JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
+
+# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
+TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
+
+# Terminates script execution and upload logs to S3
+terminate()
+{
+    SUCCESS_URL=$S3_IGNITE_BOOTSTRAP_SUCCESS
+    FAILURE_URL=$S3_IGNITE_BOOTSTRAP_FAILURE
+
+    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
+        SUCCESS_URL=${SUCCESS_URL}/
+    fi
+
+    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
+        FAILURE_URL=${FAILURE_URL}/
+    fi
+
+    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    msg=$host_name
+
+    if [ -n "$1" ]; then
+        echo "[ERROR] $1"
+        echo "[ERROR]-----------------------------------------------------"
+        echo "[ERROR] Ignite node bootstrap failed"
+        echo "[ERROR]-----------------------------------------------------"
+        msg=$1
+
+        if [ -z "$FAILURE_URL" ]; then
+            exit 1
+        fi
+
+        reportFolder=${FAILURE_URL}${host_name}
+        reportFile=$reportFolder/__error__
+    else
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Ignite node bootstrap successfully completed"
+        echo "[INFO]-----------------------------------------------------"
+
+        if [ -z "$SUCCESS_URL" ]; then
+            exit 0
+        fi
+
+        reportFolder=${SUCCESS_URL}${host_name}
+        reportFile=$reportFolder/__success__
+    fi
+
+    echo $msg > /opt/bootstrap-result
+
+    aws s3 rm --recursive $reportFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop report folder: $reportFolder"
+    fi
+
+    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
+    fi
+
+    rm -f /opt/bootstrap-result
+
+    if [ -n "$1" ]; then
+        exit 1
+    fi
+
+    exit 0
+}
+
+# Downloads specified package
+downloadPackage()
+{
+    echo "[INFO] Downloading $3 package from $1 into $2"
+
+    for i in 0 9;
+    do
+        if [[ "$1" == s3* ]]; then
+            aws s3 cp $1 $2
+            code=$?
+        else
+            curl "$1" -o "$2"
+            code=$?
+        fi
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] $3 package successfully downloaded from $1 into $2"
+            return 0
+        fi
+
+        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to download $3 package from $1 are failed"
+}
+
+# Downloads and setup JDK
+setupJava()
+{
+    rm -Rf /opt/java /opt/jdk.tar.gz
+
+    echo "[INFO] Downloading 'jdk'"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
+    if [ $? -ne 0 ]; then
+        terminate "Failed to download 'jdk'"
+    fi
+
+    echo "[INFO] Untaring 'jdk'"
+    tar -xvzf /opt/jdk.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar 'jdk'"
+    fi
+
+    rm -Rf /opt/jdk.tar.gz
+
+    unzipDir=$(ls /opt | grep "jdk")
+    if [ "$unzipDir" != "java" ]; then
+        mv /opt/$unzipDir /opt/java
+    fi
+}
+
+# Downloads and setup AWS CLI
+setupAWSCLI()
+{
+    echo "[INFO] Installing 'awscli'"
+    pip install --upgrade awscli
+    if [ $? -eq 0 ]; then
+        return 0
+    fi
+
+    echo "[ERROR] Failed to install 'awscli' using pip"
+    echo "[INFO] Trying to install awscli using zip archive"
+    echo "[INFO] Downloading awscli zip"
+
+    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
+
+    echo "[INFO] Unzipping awscli zip"
+    unzip /opt/awscli-bundle.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip awscli zip"
+    fi
+
+    rm -Rf /opt/awscli-bundle.zip
+
+    echo "[INFO] Installing awscli"
+    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install awscli"
+    fi
+
+    echo "[INFO] Successfully installed awscli from zip archive"
+}
+
+# Setup all the pre-requisites (packages, settings and etc.)
+setupPreRequisites()
+{
+    echo "[INFO] Installing 'wget' package"
+    yum -y install wget
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'wget' package"
+    fi
+
+    echo "[INFO] Installing 'net-tools' package"
+    yum -y install net-tools
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'net-tools' package"
+    fi
+
+    echo "[INFO] Installing 'python' package"
+    yum -y install python
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'python' package"
+    fi
+
+    echo "[INFO] Installing 'unzip' package"
+    yum -y install unzip
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'unzip' package"
+    fi
+
+    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
+
+    echo "[INFO] Installing 'pip'"
+    python /opt/get-pip.py
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'pip'"
+    fi
+}
+
+# Downloads and setup tests package
+setupTestsPackage()
+{
+    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
+
+    rm -Rf /opt/ignite-cassandra-tests
+
+    unzip /opt/ignite-cassandra-tests.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip tests package"
+    fi
+
+    rm -f /opt/ignite-cassandra-tests.zip
+
+    unzipDir=$(ls /opt | grep "ignite-cassandra")
+    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
+        mv /opt/$unzipDir /opt/ignite-cassandra-tests
+    fi
+
+    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
+
+    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ignite"
+
+    setupNTP
+
+    echo "[INFO] Starting logs collector daemon"
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_IGNITE_LOGS/$HOST_NAME" "/opt/ignite/work/log" "/opt/ignite/ignite-start.log" > /opt/logs-collector.log &
+
+    echo "[INFO] Logs collector daemon started: $!"
+
+    echo "----------------------------------------------------------------------------------------"
+    printInstanceInfo
+    echo "----------------------------------------------------------------------------------------"
+    tagInstance
+    bootstrapGangliaAgent "ignite" 8642
+}
+
+# Downloads Ignite package
+downloadIgnite()
+{
+    downloadPackage "$IGNITE_DOWNLOAD_URL" "/opt/ignite.zip" "Ignite"
+
+    rm -Rf /opt/ignite
+
+    echo "[INFO] Unzipping Ignite package"
+    unzip /opt/ignite.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip Ignite package"
+    fi
+
+    rm -f /opt/ignite.zip
+
+    unzipDir=$(ls /opt | grep "ignite" | grep "apache")
+    if [ "$unzipDir" != "ignite" ]; then
+        mv /opt/$unzipDir /opt/ignite
+    fi
+}
+
+# Setups Ignite
+setupIgnite()
+{
+    echo "[INFO] Creating 'ignite' group"
+    exists=$(cat /etc/group | grep ignite)
+    if [ -z "$exists" ]; then
+        groupadd ignite
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'ignite' group"
+        fi
+    fi
+
+    echo "[INFO] Creating 'ignite' user"
+    exists=$(cat /etc/passwd | grep ignite)
+    if [ -z "$exists" ]; then
+        useradd -g ignite ignite
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'ignite' user"
+        fi
+    fi
+
+    testsJar=$(find /opt/ignite-cassandra-tests -type f -name "*.jar" | grep ignite-cassandra- | grep tests.jar)
+    if [ -n "$testsJar" ]; then
+        echo "[INFO] Coping tests jar $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
+        cp $testsJar /opt/ignite/libs/optional/ignite-cassandra
+        if [ $? -ne 0 ]; then
+            terminate "Failed copy $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
+        fi
+    fi
+
+    rm -f /opt/ignite/config/ignite-cassandra-server-template.xml
+    mv -f /opt/ignite-cassandra-tests/bootstrap/aws/ignite/ignite-cassandra-server-template.xml /opt/ignite/config
+
+    chown -R ignite:ignite /opt/ignite /opt/ignite-cassandra-tests
+
+    echo "export JAVA_HOME=/opt/java" >> $1
+    echo "export IGNITE_HOME=/opt/ignite" >> $1
+    echo "export USER_LIBS=\$IGNITE_HOME/libs/optional/ignite-cassandra/*:\$IGNITE_HOME/libs/optional/ignite-slf4j/*" >> $1
+    echo "export PATH=\$JAVA_HOME/bin:\$IGNITE_HOME/bin:\$PATH" >> $1
+}
+
+###################################################################################################################
+
+echo "[INFO]-----------------------------------------------------------------"
+echo "[INFO] Bootstrapping Ignite node"
+echo "[INFO]-----------------------------------------------------------------"
+
+setupPreRequisites
+setupJava
+setupAWSCLI
+setupTestsPackage
+
+downloadIgnite
+setupIgnite "/root/.bash_profile"
+
+cmd="/opt/ignite-cassandra-tests/bootstrap/aws/ignite/ignite-start.sh"
+
+#sudo -u ignite -g ignite sh -c "$cmd | tee /opt/ignite/ignite-start.log"
+
+$cmd | tee /opt/ignite/ignite-start.log
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
similarity index 86%
rename from modules/cassandra/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
rename to modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
index f85dcd9..03b3346 100644
--- a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
@@ -25,7 +25,11 @@
         http://www.springframework.org/schema/util/spring-util.xsd">
 
     <!-- Cassandra connection settings -->
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
+        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
+            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+        </constructor-arg>
+    </bean>
 
     <util:list id="contactPoints" value-type="java.lang.String">
         ${CASSANDRA_SEEDS}
@@ -108,15 +112,14 @@
 
         <property name="cacheConfiguration">
             <list>
-                <!-- Partitioned cache example configuration (Atomic mode). -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="atomicityMode" value="ATOMIC"/>
-                    <property name="backups" value="1"/>
-                </bean>
-
                 <!-- Configuring persistence for "cache1" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache1"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
@@ -131,6 +134,11 @@
                 <!-- Configuring persistence for "cache2" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache2"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
@@ -145,6 +153,11 @@
                 <!-- Configuring persistence for "cache3" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache3"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
diff --git a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-env.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
similarity index 82%
rename from modules/cassandra/src/test/bootstrap/aws/ignite/ignite-env.sh
rename to modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
index e0643b7..bfe33719 100644
--- a/modules/cassandra/src/test/bootstrap/aws/ignite/ignite-env.sh
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,6 +17,10 @@
 # limitations under the License.
 #
 
+# -----------------------------------------------------------------------------------------------
+# Environment setup script from Ignite
+# -----------------------------------------------------------------------------------------------
+
 JVM_OPTS="-Xms10g -Xmx10g -server -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
 JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseTLAB -XX:NewSize=128m -XX:MaxNewSize=768m"
 #JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=0 -XX:SurvivorRatio=1024 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=60"
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh
new file mode 100644
index 0000000..f2c1557
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh
@@ -0,0 +1,266 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Script to start Ignite daemon (used by ignite-bootstrap.sh)
+# -----------------------------------------------------------------------------------------------
+
+#profile=/home/ignite/.bash_profile
+profile=/root/.bash_profile
+
+. $profile
+. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ignite"
+
+# Setups Cassandra seeds for this Ignite node being able to connect to Cassandra.
+# Looks for the information in S3 about already up and running Cassandra cluster nodes.
+setupCassandraSeeds()
+{
+    setupClusterSeeds "cassandra" "true"
+
+    CLUSTER_SEEDS=($CLUSTER_SEEDS)
+	count=${#CLUSTER_SEEDS[@]}
+
+    CASSANDRA_SEEDS=
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+	do
+		seed=${CLUSTER_SEEDS[$i]}
+        CASSANDRA_SEEDS="${CASSANDRA_SEEDS}<value>$seed<\/value>"
+	done
+
+    cat /opt/ignite/config/ignite-cassandra-server-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server.xml
+}
+
+# Setups Ignite nodes which this EC2 Ignite node will use to send its metadata and join Ignite cluster
+setupIgniteSeeds()
+{
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        echo "[INFO] Setting up Ignite seeds"
+
+        CLUSTER_SEEDS="127.0.0.1:47500..47509"
+
+        echo "[INFO] Using localhost address as a seed for the first Ignite node: $CLUSTER_SEEDS"
+
+        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY::-1}
+        if [ $? -ne 0 ]; then
+            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
+        fi
+    else
+        setupClusterSeeds "ignite" "true"
+    fi
+
+    CLUSTER_SEEDS=($CLUSTER_SEEDS)
+	count=${#CLUSTER_SEEDS[@]}
+
+    IGNITE_SEEDS=
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+	do
+		seed=${CLUSTER_SEEDS[$i]}
+        IGNITE_SEEDS="${IGNITE_SEEDS}<value>$seed<\/value>"
+	done
+
+    cat /opt/ignite/config/ignite-cassandra-server.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server1.xml
+    mv -f /opt/ignite/config/ignite-cassandra-server1.xml /opt/ignite/config/ignite-cassandra-server.xml
+}
+
+# Checks status of Ignite daemon
+checkIgniteStatus()
+{
+    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
+
+    nodeId=
+    nodeAddrs=
+    nodePorts=
+    topology=
+    metrics=
+
+    logFile=$(ls /opt/ignite/work/log/ | grep "\.log$")
+    if [ -n "$logFile" ]; then
+        logFile=/opt/ignite/work/log/$logFile
+        nodeId=$(cat $logFile | grep "Local node \[ID")
+        nodeAddrs=$(cat $logFile | grep "Local node addresses:")
+        nodePorts=$(cat $logFile | grep "Local ports:")
+        topology=$(cat $logFile | grep "Topology snapshot")
+        metrics=$(cat $logFile | grep "Metrics for local node" | head -n 1)
+    fi
+
+    if [ -n "$nodeId" ] && [ -n "$nodeAddrs" ] && [ -n "$nodePorts" ] && [ -n "$topology" ] && [ -n "$metrics" ] && [ -n "$proc" ]; then
+        sleep 30s
+        return 0
+    fi
+
+    return 1
+}
+
+# Gracefully starts Ignite daemon and waits until it joins Ignite cluster
+startIgnite()
+{
+    echo "[INFO]-------------------------------------------------------------"
+    echo "[INFO] Trying attempt $START_ATTEMPT to start Ignite daemon"
+    echo "[INFO]-------------------------------------------------------------"
+    echo ""
+
+    setupCassandraSeeds
+    setupIgniteSeeds
+
+    waitToJoinCluster
+
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY::-1}
+        if [ $? -ne 0 ]; then
+            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
+        fi
+    fi
+
+    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
+    proc=($proc)
+
+    if [ -n "${proc[1]}" ]; then
+        echo "[INFO] Terminating existing Ignite process ${proc[1]}"
+        kill -9 ${proc[1]}
+    fi
+
+    echo "[INFO] Starting Ignite"
+    rm -Rf /opt/ignite/work/*
+    /opt/ignite/bin/ignite.sh /opt/ignite/config/ignite-cassandra-server.xml &
+
+    echo "[INFO] Ignite job id: $!"
+
+    sleep 1m
+
+    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
+}
+
+#######################################################################################################
+
+START_ATTEMPT=0
+
+# Cleans all the previous metadata about this EC2 node
+unregisterNode
+
+# Tries to get first-node lock
+tryToGetFirstNodeLock
+
+echo "[INFO]-----------------------------------------------------------------"
+
+if [ "$FIRST_NODE_LOCK" == "true" ]; then
+    echo "[INFO] Starting first Ignite node"
+else
+    echo "[INFO] Starting Ignite node"
+fi
+
+echo "[INFO]-----------------------------------------------------------------"
+printInstanceInfo
+echo "[INFO]-----------------------------------------------------------------"
+
+if [ "$FIRST_NODE_LOCK" != "true" ]; then
+    waitFirstClusterNodeRegistered "true"
+else
+    cleanupMetadata
+fi
+
+# Applies Ignite environment settings from ignite-env.sh
+envScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/ignite-env.sh)
+if [ -f "$envScript" ]; then
+    . $envScript
+fi
+
+# Start Ignite daemon
+startIgnite
+
+startTime=$(date +%s)
+
+# Trying multiple attempts to start Ignite daemon
+while true; do
+    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
+
+    checkIgniteStatus
+
+    if [ $? -eq 0 ]; then
+        sleep 1m
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Ignite daemon successfully started"
+        echo "[INFO]-----------------------------------------------------"
+        echo $proc
+        echo "[INFO]-----------------------------------------------------"
+
+        # Once node joined the cluster we need to remove cluster-join lock
+        # to allow other EC2 nodes to acquire it and join cluster sequentially
+        removeClusterJoinLock
+
+        break
+    fi
+
+    currentTime=$(date +%s)
+    duration=$(( $currentTime-$startTime ))
+    duration=$(( $duration/60 ))
+
+    if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
+        if [ "$FIRST_NODE_LOCK" == "true" ]; then
+            # If the first node of Ignite cluster failed to start Ignite daemon in SERVICE_STARTUP_TIME min,
+            # we will not try any other attempts and just terminate with error. Terminate function itself, will
+            # take care about removing all the locks holding by this node.
+            terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first Ignite daemon is still not up and running"
+        else
+            # If node isn't the first node of Ignite cluster and it failed to start we need to
+            # remove cluster-join lock to allow other EC2 nodes to acquire it
+            removeClusterJoinLock
+
+            # If node failed all SERVICE_START_ATTEMPTS attempts to start Ignite daemon we will not
+            # try anymore and terminate with error
+            if [ $START_ATTEMPT -gt $SERVICE_START_ATTEMPTS ]; then
+                terminate "${SERVICE_START_ATTEMPTS} attempts exceed, but Ignite daemon is still not up and running"
+            fi
+
+            # New attempt to start Ignite daemon
+            startIgnite
+        fi
+
+        continue
+    fi
+
+    # Handling situation when Ignite daemon process abnormally terminated
+    if [ -z "$proc" ]; then
+        # If this is the first node of Ignite cluster just terminating with error
+        if [ "$FIRST_NODE_LOCK" == "true" ]; then
+            terminate "Failed to start Ignite daemon"
+        fi
+
+        # Remove cluster-join lock to allow other EC2 nodes to acquire it
+        removeClusterJoinLock
+
+        echo "[WARN] Failed to start Ignite daemon. Sleeping for extra 30sec"
+        sleep 30s
+
+        # New attempt to start Ignite daemon
+        startIgnite
+
+        continue
+    fi
+
+    echo "[INFO] Waiting for Ignite daemon to start, time passed ${duration}min"
+    sleep 30s
+done
+
+# Once Ignite daemon successfully started we registering new Ignite node in S3
+registerNode
+
+# Terminating script with zero exit code
+terminate
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh b/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh
new file mode 100644
index 0000000..1634b89
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh
@@ -0,0 +1,173 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Logs collector daemon
+# -----------------------------------------------------------------------------------------------
+# Script is launched in background by all EC2 nodes of all clusters (Cassandra, Ignite, Tests) and
+# periodically (each 30 seconds) checks if specific S3 trigger file (specified by $S3_LOGS_TRIGGER_URL)
+# was created or its timestamp was changed. Such an event serve as a trigger for the script
+# to collect EC2 instance logs (from folder specified by $1) and upload them into specific
+# S3 folder (specified by $S3_LOGS_FOLDER).
+# -----------------------------------------------------------------------------------------------
+
+uploadLogs()
+{
+    if [ ! -d "$1" ]; then
+        echo "[INFO] Logs directory doesn't exist: $1"
+        return 0
+    fi
+
+    echo "[INFO] Uploading logs from directory: $1"
+
+    dirList=$(ls $1 | head -1)
+
+    if [ -z "$dirList" ]; then
+        echo "[INFO] Directory is empty: $1"
+    fi
+
+    for i in 0 9;
+    do
+        aws s3 sync --sse AES256 --delete "$1" "$S3_LOGS_FOLDER"
+        code=$?
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] Successfully uploaded logs from directory: $1"
+            return 0
+        fi
+
+        echo "[WARN] Failed to upload logs from $i attempt, sleeping extra 30sec"
+        sleep 30s
+    done
+
+    echo "[ERROR] All 10 attempts to upload logs are failed for the directory: $1"
+}
+
+createNewLogsSnapshot()
+{
+    rm -f ~/logs-collector.snapshot.new
+
+    for log_src in "$@"
+    do
+        if [ -d "$log_src" ] || [ -f "$log_src" ]; then
+            ls -alR $log_src >> ~/logs-collector.snapshot.new
+
+        fi
+    done
+}
+
+checkLogsChanged()
+{
+    createNewLogsSnapshot $@
+
+    if [ ! -f "~/logs-collector.snapshot" ]; then
+        return 1
+    fi
+
+    diff "~/logs-collector.snapshot" "~/logs-collector.snapshot.new" > /dev/null
+
+    return $?
+}
+
+updateLogsSnapshot()
+{
+    if [ ! -f "~/logs-collector.snapshot.new" ]; then
+        return 0
+    fi
+
+    rm -f "~/logs-collector.snapshot"
+    mv "~/logs-collector.snapshot.new" "~/logs-collector.snapshot"
+}
+
+collectLogs()
+{
+    createNewLogsSnapshot
+
+    rm -Rf ~/logs-collector-logs
+    mkdir -p ~/logs-collector-logs
+
+    for log_src in "$@"
+    do
+        if [ -f "$log_src" ]; then
+            echo "[INFO] Collecting log file: $log_src"
+            cp -f $log_src ~/logs-collector-logs
+        elif [ -d "$log_src" ]; then
+            echo "[INFO] Collecting logs from folder: $log_src"
+            cp -Rf $log_src ~/logs-collector-logs
+        fi
+    done
+
+    uploadLogs ~/logs-collector-logs
+
+    rm -Rf ~/logs-collector-logs
+
+    updateLogsSnapshot
+}
+
+echo "[INFO] Running Logs collector service"
+
+if [ -z "$1" ]; then
+    echo "[ERROR] Logs collection S3 trigger URL doesn't specified"
+    exit 1
+fi
+
+S3_LOGS_TRIGGER_URL=$1
+
+echo "[INFO] Logs collection S3 trigger URL: $S3_LOGS_TRIGGER_URL"
+
+if [ -z "$2" ]; then
+    echo "[ERROR] S3 folder where to upload logs doesn't specified"
+    exit 1
+fi
+
+S3_LOGS_FOLDER=$2
+
+echo "[INFO] S3 logs upload folder: $S3_LOGS_FOLDER"
+
+shift 2
+
+if [ -z "$1" ]; then
+    echo "[WARN] Local logs sources don't specified"
+else
+    echo "[INFO] Local logs sources: $@"
+fi
+
+echo "--------------------------------------------------------------------"
+
+TRIGGER_STATE=
+
+while true; do
+    sleep 30s
+
+    STATE=$(aws s3 ls $S3_LOGS_TRIGGER_URL)
+
+    if [ -z "$STATE" ] || [ "$STATE" == "$TRIGGER_STATE" ]; then
+        checkLogsChanged
+
+        if [ $? -eq 0 ]; then
+            continue
+        fi
+    fi
+
+    TRIGGER_STATE=$STATE
+
+    collectLogs $@ /var/log/cloud-init.log /var/log/cloud-init-output.log
+
+    echo "--------------------------------------------------------------------"
+done
diff --git a/modules/cassandra/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml b/modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
similarity index 86%
rename from modules/cassandra/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
rename to modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
index 53c33a2..77ffb9e 100644
--- a/modules/cassandra/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
+++ b/modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
@@ -24,7 +24,11 @@
         http://www.springframework.org/schema/util
         http://www.springframework.org/schema/util/spring-util.xsd">
 
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
+        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
+            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+        </constructor-arg>
+    </bean>
 
     <util:list id="contactPoints" value-type="java.lang.String">
         ${CASSANDRA_SEEDS}
@@ -114,6 +118,11 @@
                 <!-- Configuring persistence for "cache1" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache1"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
@@ -128,6 +137,11 @@
                 <!-- Configuring persistence for "cache2" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache2"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
@@ -142,6 +156,11 @@
                 <!-- Configuring persistence for "cache3" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache3"/>
+                    <property name="startSize" value="1000000"/>
+                    <property name="cacheMode" value="PARTITIONED"/>
+                    <property name="backups" value="0"/>
+                    <property name="offHeapMaxMemory" value="0"/>
+                    <property name="swapEnabled" value="false"/>
                     <property name="readThrough" value="true"/>
                     <property name="writeThrough" value="true"/>
                     <property name="writeBehindEnabled" value="true"/>
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh
new file mode 100644
index 0000000..8e6faff
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh
@@ -0,0 +1,317 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Bootstrap script to spin up Tests cluster
+# -----------------------------------------------------------------------------------------------
+
+# URL to download AWS CLI tools
+AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
+
+# URL to download JDK
+JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
+
+# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
+TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
+
+# Terminates script execution and upload logs to S3
+terminate()
+{
+    SUCCESS_URL=$S3_TESTS_SUCCESS
+    FAILURE_URL=$S3_TESTS_FAILURE
+
+    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
+        SUCCESS_URL=${SUCCESS_URL}/
+    fi
+
+    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
+        FAILURE_URL=${FAILURE_URL}/
+    fi
+
+    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    msg=$host_name
+
+    if [ -n "$1" ]; then
+        echo "[ERROR] $1"
+        echo "[ERROR]-----------------------------------------------------"
+        echo "[ERROR] Test node bootstrap failed"
+        echo "[ERROR]-----------------------------------------------------"
+        msg=$1
+
+        if [ -z "$FAILURE_URL" ]; then
+            exit 1
+        fi
+
+        reportFolder=${FAILURE_URL}${host_name}
+        reportFile=$reportFolder/__error__
+    else
+        echo "[INFO]-----------------------------------------------------"
+        echo "[INFO] Test node bootstrap successfully completed"
+        echo "[INFO]-----------------------------------------------------"
+
+        if [ -z "$SUCCESS_URL" ]; then
+            exit 0
+        fi
+
+        reportFolder=${SUCCESS_URL}${host_name}
+        reportFile=$reportFolder/__success__
+    fi
+
+    echo $msg > /opt/bootstrap-result
+
+    aws s3 rm --recursive $reportFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop report folder: $reportFolder"
+    fi
+
+    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
+    fi
+
+    rm -f /opt/bootstrap-result
+
+    if [ -n "$1" ]; then
+        exit 1
+    fi
+
+    exit 0
+}
+
+# Downloads specified package
+downloadPackage()
+{
+    echo "[INFO] Downloading $3 package from $1 into $2"
+
+    for i in 0 9;
+    do
+        if [[ "$1" == s3* ]]; then
+            aws s3 cp $1 $2
+            code=$?
+        else
+            curl "$1" -o "$2"
+            code=$?
+        fi
+
+        if [ $code -eq 0 ]; then
+            echo "[INFO] $3 package successfully downloaded from $1 into $2"
+            return 0
+        fi
+
+        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
+        sleep 5s
+    done
+
+    terminate "All 10 attempts to download $3 package from $1 are failed"
+}
+
+# Downloads and setup JDK
+setupJava()
+{
+    rm -Rf /opt/java /opt/jdk.tar.gz
+
+    echo "[INFO] Downloading 'jdk'"
+    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
+    if [ $? -ne 0 ]; then
+        terminate "Failed to download 'jdk'"
+    fi
+
+    echo "[INFO] Untaring 'jdk'"
+    tar -xvzf /opt/jdk.tar.gz -C /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to untar 'jdk'"
+    fi
+
+    rm -Rf /opt/jdk.tar.gz
+
+    unzipDir=$(ls /opt | grep "jdk")
+    if [ "$unzipDir" != "java" ]; then
+        mv /opt/$unzipDir /opt/java
+    fi
+}
+
+# Downloads and setup AWS CLI
+setupAWSCLI()
+{
+    echo "[INFO] Installing 'awscli'"
+    pip install --upgrade awscli
+    if [ $? -eq 0 ]; then
+        return 0
+    fi
+
+    echo "[ERROR] Failed to install 'awscli' using pip"
+    echo "[INFO] Trying to install awscli using zip archive"
+    echo "[INFO] Downloading awscli zip"
+
+    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
+
+    echo "[INFO] Unzipping awscli zip"
+    unzip /opt/awscli-bundle.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip awscli zip"
+    fi
+
+    rm -Rf /opt/awscli-bundle.zip
+
+    echo "[INFO] Installing awscli"
+    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install awscli"
+    fi
+
+    echo "[INFO] Successfully installed awscli from zip archive"
+}
+
+# Setup all the pre-requisites (packages, settings and etc.)
+setupPreRequisites()
+{
+    echo "[INFO] Installing 'wget' package"
+    yum -y install wget
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'wget' package"
+    fi
+
+    echo "[INFO] Installing 'net-tools' package"
+    yum -y install net-tools
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'net-tools' package"
+    fi
+
+    echo "[INFO] Installing 'python' package"
+    yum -y install python
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'python' package"
+    fi
+
+    echo "[INFO] Installing 'unzip' package"
+    yum -y install unzip
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'unzip' package"
+    fi
+
+    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
+
+    echo "[INFO] Installing 'pip'"
+    python /opt/get-pip.py
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install 'pip'"
+    fi
+}
+
+# Downloads and setup tests package
+setupTestsPackage()
+{
+    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
+
+    rm -Rf /opt/ignite-cassandra-tests
+
+    unzip /opt/ignite-cassandra-tests.zip -d /opt
+    if [ $? -ne 0 ]; then
+        terminate "Failed to unzip tests package"
+    fi
+
+    rm -f /opt/ignite-cassandra-tests.zip
+
+    unzipDir=$(ls /opt | grep "ignite-cassandra")
+    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
+        mv /opt/$unzipDir /opt/ignite-cassandra-tests
+    fi
+
+    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
+
+    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
+
+    setupNTP
+
+    echo "[INFO] Starting logs collector daemon"
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_TESTS_LOGS/$HOST_NAME" "/opt/ignite-cassandra-tests/logs" > /opt/logs-collector.log &
+
+    echo "[INFO] Logs collector daemon started: $!"
+
+    echo "----------------------------------------------------------------------------------------"
+    printInstanceInfo
+    echo "----------------------------------------------------------------------------------------"
+    tagInstance
+    bootstrapGangliaAgent "test" 8643
+
+    ###################################################
+    # Extra configuration specific only for test node #
+    ###################################################
+
+    echo "[INFO] Installing bc package"
+
+    yum -y install bc
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install bc package"
+    fi
+
+    echo "[INFO] Installing zip package"
+
+    yum -y install zip
+
+    if [ $? -ne 0 ]; then
+        terminate "Failed to install zip package"
+    fi
+
+    echo "[INFO] Creating 'ignite' group"
+    exists=$(cat /etc/group | grep ignite)
+    if [ -z "$exists" ]; then
+        groupadd ignite
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'ignite' group"
+        fi
+    fi
+
+    echo "[INFO] Creating 'ignite' user"
+    exists=$(cat /etc/passwd | grep ignite)
+    if [ -z "$exists" ]; then
+        useradd -g ignite ignite
+        if [ $? -ne 0 ]; then
+            terminate "Failed to create 'ignite' user"
+        fi
+    fi
+
+    mkdir -p /opt/ignite-cassandra-tests/logs
+    chown -R ignite:ignite /opt/ignite-cassandra-tests
+
+    echo "export JAVA_HOME=/opt/java" >> $1
+    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> $1
+}
+
+###################################################################################################################
+
+echo "[INFO]-----------------------------------------------------------------"
+echo "[INFO] Bootstrapping Tests node"
+echo "[INFO]-----------------------------------------------------------------"
+
+setupPreRequisites
+setupJava
+setupAWSCLI
+setupTestsPackage "/root/.bash_profile"
+
+cmd="/opt/ignite-cassandra-tests/bootstrap/aws/tests/tests-manager.sh"
+
+#sudo -u ignite -g ignite sh -c "$cmd > /opt/ignite-cassandra-tests/tests-manager" &
+
+$cmd > /opt/ignite-cassandra-tests/logs/tests-manager.log &
+
+terminate
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh
new file mode 100644
index 0000000..c0f5d6b
--- /dev/null
+++ b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh
@@ -0,0 +1,458 @@
+#!/bin/sh
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------------------------
+# Tests manager daemon
+# -----------------------------------------------------------------------------------------------
+# Script is launched in background by all nodes of Tests cluster and
+# periodically (each 30 seconds) checks if specific S3 trigger file was created or
+# its timestamp was changed. Such an event serve as a trigger for the script to start
+# preparing to run load tests.
+# -----------------------------------------------------------------------------------------------
+
+#profile=/home/ignite/.bash_profile
+profile=/root/.bash_profile
+
+. $profile
+. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
+
+# Switch test node to IDLE state
+switchToIdleState()
+{
+    if [ "$NODE_STATE" != "IDLE" ]; then
+        echo "[INFO] Switching node to IDLE state"
+        dropStateFlag "$S3_TESTS_WAITING" "$S3_TESTS_PREPARING" "$S3_TESTS_RUNNING"
+        createStateFlag "$S3_TESTS_IDLE"
+        NODE_STATE="IDLE"
+        echo "[INFO] Node was switched to IDLE state"
+    fi
+}
+
+# Switch test node to PREPARING state
+switchToPreparingState()
+{
+    if [ "$NODE_STATE" != "PREPARING" ]; then
+        echo "[INFO] Switching node to PREPARING state"
+        dropStateFlag "$S3_TESTS_WAITING" "$S3_TESTS_IDLE" "$S3_TESTS_RUNNING"
+        createStateFlag "$S3_TESTS_PREPARING"
+        NODE_STATE="PREPARING"
+        echo "[INFO] Node was switched to PREPARING state"
+    fi
+}
+
+# Switch test node to WAITING state
+switchToWaitingState()
+{
+    if [ "$NODE_STATE" != "WAITING" ]; then
+        echo "[INFO] Switching node to WAITING state"
+        dropStateFlag "$S3_TESTS_IDLE" "$S3_TESTS_PREPARING" "$S3_TESTS_RUNNING"
+        createStateFlag "$S3_TESTS_WAITING"
+        NODE_STATE="WAITING"
+        echo "[INFO] Node was switched to WAITING state"
+    fi
+}
+
+# Switch test node to RUNNING state
+switchToRunningState()
+{
+    if [ "$NODE_STATE" != "RUNNING" ]; then
+        echo "[INFO] Switching node to RUNNING state"
+        dropStateFlag "$S3_TESTS_IDLE" "$S3_TESTS_PREPARING" "$S3_TESTS_WAITING"
+        createStateFlag "$S3_TESTS_RUNNING"
+        NODE_STATE="RUNNING"
+        echo "[INFO] Node was switched to RUNNING state"
+    fi
+}
+
+# Creates appropriate state flag for the node in S3
+createStateFlag()
+{
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    aws s3 cp --sse AES256 /etc/hosts ${1}${HOST_NAME}
+    if [ $? -ne 0 ]; then
+        terminate "Failed to create state flag: ${1}${HOST_NAME}"
+    fi
+}
+
+# Drops appropriate state flag for the node in S3
+dropStateFlag()
+{
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    for flagUrl in "$@"
+    do
+        exists=$(aws s3 ls ${flagUrl}${HOST_NAME})
+        if [ -n "$exists" ]; then
+            aws s3 rm ${flagUrl}${HOST_NAME}
+            if [ $? -ne 0 ]; then
+                terminate "Failed to drop state flag: ${flagUrl}${HOST_NAME}"
+            fi
+        fi
+    done
+}
+
+# Removes tests summary report from S3
+dropTestsSummary()
+{
+    exists=$(aws s3 ls $S3_TESTS_SUMMARY)
+    if [ -z "$exists" ]; then
+        return 0
+    fi
+
+    aws s3 rm $S3_TESTS_SUMMARY
+    if [ $? -ne 0 ]; then
+        terminate "Failed to drop tests summary info: $S3_TESTS_SUMMARY"
+    fi
+}
+
+# Recreate all the necessary Cassandra artifacts before running Load tests
+recreateCassandraArtifacts()
+{
+    /opt/ignite-cassandra-tests/recreate-cassandra-artifacts.sh
+    if [ $? -ne 0 ]; then
+        terminate "Failed to recreate Cassandra artifacts"
+    fi
+}
+
+# Setups Cassandra seeds for this Tests node being able to connect to Cassandra.
+# Looks for the information in S3 about already up and running Cassandra cluster nodes.
+setupCassandraSeeds()
+{
+    if [ $CASSANDRA_NODES_COUNT -eq 0 ]; then
+        return 0
+    fi
+
+    setupClusterSeeds "cassandra"
+
+    CASSANDRA_SEEDS1=$(echo $CLUSTER_SEEDS | sed -r "s/ /,/g")
+    CASSANDRA_SEEDS2=
+
+    CLUSTER_SEEDS=($CLUSTER_SEEDS)
+	count=${#CLUSTER_SEEDS[@]}
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+	do
+		seed=${CLUSTER_SEEDS[$i]}
+        CASSANDRA_SEEDS2="${CASSANDRA_SEEDS2}<value>$seed<\/value>"
+	done
+
+    echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS1"
+
+    echo "contact.points=$CASSANDRA_SEEDS1" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/connection.properties
+
+    cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS2/g" > /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
+}
+
+# Setups Ignite nodes for this Tests node being able to connect to Ignite.
+# Looks for the information in S3 about already up and running Cassandra cluster nodes.
+setupIgniteSeeds()
+{
+    if [ $IGNITE_NODES_COUNT -eq 0 ]; then
+        return 0
+    fi
+
+    setupClusterSeeds "ignite"
+
+    CLUSTER_SEEDS=($CLUSTER_SEEDS)
+	count=${#CLUSTER_SEEDS[@]}
+
+    IGNITE_SEEDS=
+
+	for (( i=0; i<=$(( $count -1 )); i++ ))
+	do
+		seed=${CLUSTER_SEEDS[$i]}
+        IGNITE_SEEDS="${IGNITE_SEEDS}<value>$seed<\/value>"
+	done
+
+    echo "[INFO] Using Ignite seeds: $IGNITE_SEEDS"
+
+    cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
+    rm -f /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
+}
+
+# Setups Cassandra credentials to connect to Cassandra cluster
+setupCassandraCredentials()
+{
+    echo "admin.user=cassandra" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
+    echo "admin.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
+    echo "regular.user=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
+    echo "regular.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
+}
+
+# Triggering first time tests execution for all nodes in the Tests cluster
+triggerFirstTimeTestsExecution()
+{
+    if [ -z "$TESTS_TYPE" ]; then
+        return 0
+    fi
+
+    tryToGetFirstNodeLock
+    if [ $? -ne 0 ]; then
+        return 0
+    fi
+
+    sleep 30s
+
+    echo "[INFO] Triggering first time tests execution"
+
+    echo "TESTS_TYPE=$TESTS_TYPE" > /opt/ignite-cassandra-tests/tests-trigger
+    echo "#--------------------------------------------------" >> /opt/ignite-cassandra-tests/tests-trigger
+    echo "" >> /opt/ignite-cassandra-tests/tests-trigger
+    cat /opt/ignite-cassandra-tests/settings/tests.properties >> /opt/ignite-cassandra-tests/tests-trigger
+
+    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/tests-trigger $S3_TESTS_TRIGGER
+    code=$?
+
+    rm -f /opt/ignite-cassandra-tests/tests-trigger
+
+    if [ $code -ne 0 ]; then
+        terminate "Failed to create tests trigger: $S3_TESTS_TRIGGER"
+    fi
+}
+
+# Cleans previously created logs from S3
+cleanPreviousLogs()
+{
+	for logFile in /opt/ignite-cassandra-tests/logs/*
+	do
+	    managerLog=$(echo $logFile | grep "tests-manager")
+	    if [ -z "$managerLog" ]; then
+	        rm -Rf $logFile
+	    fi
+	done
+
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+	aws s3 rm --recursive ${S3_TESTS_FAILURE}${HOST_NAME}
+	aws s3 rm --recursive ${S3_TESTS_SUCCESS}${HOST_NAME}
+}
+
+# Uploads tests logs to S3
+uploadTestsLogs()
+{
+    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
+
+    if [ -f "/opt/ignite-cassandra-tests/logs/__success__" ]; then
+        logsFolder=${S3_TESTS_SUCCESS}${HOST_NAME}
+    else
+        logsFolder=${S3_TESTS_FAILURE}${HOST_NAME}
+    fi
+
+    aws s3 rm --recursive $logsFolder
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to drop logs folder: $logsFolder"
+    fi
+
+    if [ -d "/opt/ignite-cassandra-tests/logs" ]; then
+        aws s3 sync --sse AES256 /opt/ignite-cassandra-tests/logs $logsFolder
+        if [ $? -ne 0 ]; then
+            echo "[ERROR] Failed to export tests logs to: $logsFolder"
+        fi
+    fi
+}
+
+# Runs tests-report.sh to prepare tests summary report
+buildTestsSummaryReport()
+{
+    reportScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/tests-report.sh)
+    $reportScript
+
+    if [ -n "$S3_LOGS_TRIGGER" ]; then
+        aws s3 cp --sse AES256 /etc/hosts $S3_LOGS_TRIGGER
+        if [ $? -ne 0 ]; then
+            echo "[ERROR] Failed to trigger logs collection"
+        fi
+    fi
+}
+
+# Running load tests
+runLoadTests()
+{
+    cd /opt/ignite-cassandra-tests
+
+    if [ "$TESTS_TYPE" == "ignite" ]; then
+        echo "[INFO] Running Ignite load tests"
+        ./ignite-load-tests.sh &
+    else
+        echo "[INFO] Running Cassandra load tests"
+        ./cassandra-load-tests.sh &
+    fi
+
+    testsJobId=$!
+
+    echo "[INFO] Tests job id: $testsJobId"
+
+    sleep 1m
+
+    LOGS_SNAPSHOT=$(ls -al /opt/ignite-cassandra-tests/logs)
+    LOGS_SNAPSHOT_TIME=$(date +%s)
+
+    TERMINATED=
+
+    # tests monitoring
+    while true; do
+        proc=$(ps -ef | grep java | grep "org.apache.ignite.tests")
+        if [ -z "$proc" ]; then
+            break
+        fi
+
+        NEW_LOGS_SNAPSHOT=$(ls -al /opt/ignite-cassandra-tests/logs)
+        NEW_LOGS_SNAPSHOT_TIME=$(date +%s)
+
+        # if logs state updated it means that tests are running and not stuck
+        if [ "$LOGS_SNAPSHOT" != "$NEW_LOGS_SNAPSHOT" ]; then
+            LOGS_SNAPSHOT=$NEW_LOGS_SNAPSHOT
+            LOGS_SNAPSHOT_TIME=$NEW_LOGS_SNAPSHOT_TIME
+            continue
+        fi
+
+        duration=$(( $NEW_LOGS_SNAPSHOT_TIME-$LOGS_SNAPSHOT_TIME ))
+        duration=$(( $duration/60 ))
+
+        # if logs wasn't updated during 5min it means that load tests stuck
+        if [ $duration -gt 5 ]; then
+            proc=($proc)
+            kill -9 ${proc[1]}
+            TERMINATED="true"
+            break
+        fi
+
+        echo "[INFO] Waiting extra 30sec for load tests to complete"
+
+        sleep 30s
+    done
+
+    rm -f /opt/ignite-cassandra-tests/logs/tests.properties
+    cp /opt/ignite-cassandra-tests/settings/tests.properties /opt/ignite-cassandra-tests/logs
+
+    if [ "$TERMINATED" == "true" ]; then
+        echo "[ERROR] Load tests stuck, tests process terminated"
+        echo "Load tests stuck, tests process terminated" > /opt/ignite-cassandra-tests/logs/__error__
+        return 0
+    fi
+
+    failed=
+    if [ "$TESTS_TYPE" == "cassandra" ]; then
+        failed=$(cat /opt/ignite-cassandra-tests/cassandra-load-tests.log | grep "load tests execution failed")
+    else
+        failed=$(cat /opt/ignite-cassandra-tests/ignite-load-tests.log | grep "load tests execution failed")
+    fi
+
+    if [ -n "$failed" ]; then
+        echo "[ERROR] Load tests execution failed"
+        echo "Load tests execution failed" > /opt/ignite-cassandra-tests/logs/__error__
+    else
+        echo "[INFO] Load tests execution successfully completed"
+        echo "Load tests execution successfully completed" > /opt/ignite-cassandra-tests/logs/__success__
+    fi
+}
+
+#######################################################################################################
+
+sleep 1m
+
+NODE_STATE=
+TRIGGER_STATE=
+
+printInstanceInfo
+setupCassandraCredentials
+switchToIdleState
+
+triggerFirstTimeTestsExecution
+
+registerNode
+
+while true; do
+    # switching state to IDLE
+    switchToIdleState
+
+    sleep 30s
+
+    NEW_TRIGGER_STATE=$(aws s3 ls $S3_TESTS_TRIGGER | xargs)
+    if [ -z "$NEW_TRIGGER_STATE" ] || [ "$NEW_TRIGGER_STATE" == "$TRIGGER_STATE" ]; then
+        continue
+    fi
+
+    echo "----------------------------------------------------------------------"
+    echo "[INFO] Tests trigger changed"
+    echo "----------------------------------------------------------------------"
+    echo "[INFO] Old trigger: $TRIGGER_STATE"
+    echo "----------------------------------------------------------------------"
+    echo "[INFO] New trigger: $NEW_TRIGGER_STATE"
+    echo "----------------------------------------------------------------------"
+
+    TRIGGER_STATE=$NEW_TRIGGER_STATE
+
+    aws s3 cp $S3_TESTS_TRIGGER /opt/ignite-cassandra-tests/tests-trigger
+    if [ $? -ne 0 ]; then
+        echo "[ERROR] Failed to download tests trigger info from: $S3_TESTS_TRIGGER"
+        continue
+    fi
+
+    TESTS_TYPE=$(cat /opt/ignite-cassandra-tests/tests-trigger | grep TESTS_TYPE | xargs | sed -r "s/TESTS_TYPE=//g")
+    if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
+        rm -f /opt/ignite-cassandra-tests/tests-trigger
+        echo "[ERROR] Incorrect tests type specified in the trigger info: $S3_TESTS_TRIGGER"
+        continue
+    fi
+
+    rm -f /opt/ignite-cassandra-tests/settings/tests.properties
+    mv -f /opt/ignite-cassandra-tests/tests-trigger /opt/ignite-cassandra-tests/settings/tests.properties
+	
+	waitAllTestNodesCompletedTests
+	
+    # switching state to PREPARING
+    switchToPreparingState
+
+    waitAllClusterNodesReady "cassandra"
+    waitAllClusterNodesReady "ignite"
+    setupCassandraSeeds
+    setupIgniteSeeds
+	
+	cleanPreviousLogs
+
+    tryToGetFirstNodeLock
+    if [ $? -eq 0 ]; then
+        dropTestsSummary
+        recreateCassandraArtifacts
+    fi
+
+    # switching state to WAITING
+    switchToWaitingState
+
+    waitAllClusterNodesReady "test"
+
+    if [ "$FIRST_NODE_LOCK" == "true" ]; then
+        aws s3 rm $S3_TESTS_TRIGGER
+    fi
+
+    # switching state to RUNNING
+    switchToRunningState
+
+    runLoadTests
+    uploadTestsLogs
+
+    tryToGetFirstNodeLock
+    if [ $? -eq 0 ]; then
+        waitAllTestNodesCompletedTests
+        buildTestsSummaryReport
+        removeFirstNodeLock
+    fi
+done
\ No newline at end of file
diff --git a/modules/cassandra/src/test/bootstrap/aws/tests/tests-report.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
similarity index 83%
rename from modules/cassandra/src/test/bootstrap/aws/tests/tests-report.sh
rename to modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
index 762dc6f..1576d57 100644
--- a/modules/cassandra/src/test/bootstrap/aws/tests/tests-report.sh
+++ b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,118 +17,20 @@
 # limitations under the License.
 #
 
+# -----------------------------------------------------------------------------------------------
+# Tests report builder
+# -----------------------------------------------------------------------------------------------
+# Script is used to analyze load tests logs collected from all 'Tests' cluster nodes and build
+# summary report
+# -----------------------------------------------------------------------------------------------
+
 #profile=/home/ignite/.bash_profile
 profile=/root/.bash_profile
 
 . $profile
+. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
 
-validate()
-{
-    if [ -z "$TESTS_TYPE" ]; then
-        terminate "Tests type 'ignite' or 'cassandra' should be specified"
-    fi
-
-    if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
-        terminate "Incorrect tests type specified: $TESTS_TYPE"
-    fi
-
-    if [ -z "$S3_TESTS_SUCCESS_URL" ]; then
-        terminate "Tests success URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_SUCCESS_URL" != */ ]]; then
-        S3_TESTS_SUCCESS_URL=${S3_TESTS_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_FAILURE_URL" ]; then
-        terminate "Tests failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_FAILURE_URL" != */ ]]; then
-        S3_TESTS_FAILURE_URL=${S3_TESTS_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_RUNNING_URL" ]; then
-        terminate "Tests running URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_RUNNING_URL" != */ ]]; then
-        S3_TESTS_RUNNING_URL=${S3_TESTS_RUNNING_URL}/
-    fi
-
-    if [ -z "$S3_TESTS_WAITING_URL" ]; then
-        terminate "Tests waiting URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_WAITING_URL" != */ ]]; then
-        S3_TESTS_WAITING_URL=${S3_TESTS_WAITING_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_SUCCESS_URL" ]; then
-        terminate "Ignite success URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_SUCCESS_URL" != */ ]]; then
-        S3_IGNITE_SUCCESS_URL=${S3_IGNITE_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_FAILURE_URL" ]; then
-        terminate "Ignite failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_FAILURE_URL" != */ ]]; then
-        S3_IGNITE_FAILURE_URL=${S3_IGNITE_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_SUCCESS_URL" ]; then
-        terminate "Cassandra success URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_SUCCESS_URL" != */ ]]; then
-        S3_CASSANDRA_SUCCESS_URL=${S3_CASSANDRA_SUCCESS_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_FAILURE_URL" ]; then
-        terminate "Cassandra failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_FAILURE_URL" != */ ]]; then
-        S3_CASSANDRA_FAILURE_URL=${S3_CASSANDRA_FAILURE_URL}/
-    fi
-
-    if [ -z "$S3_TEST_NODES_DISCOVERY_URL" ]; then
-        terminate "Tests S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_TEST_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_TEST_NODES_DISCOVERY_URL=${S3_TEST_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_NODES_DISCOVERY_URL" ]; then
-        terminate "Cassandra S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_NODES_DISCOVERY_URL" ]; then
-        terminate "Ignite S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
-    fi
-
-    if [ -z "$S3_IGNITE_NODES_DISCOVERY_URL" ]; then
-        terminate "Ignite S3 discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_NODES_DISCOVERY_URL" != */ ]]; then
-        S3_IGNITE_NODES_DISCOVERY_URL=${S3_IGNITE_NODES_DISCOVERY_URL}/
-    fi
-}
-
+# Building tests summary report
 reportTestsSummary()
 {
     echo "[INFO] Preparing tests results summary"
@@ -145,8 +47,8 @@
     mkdir -p $SUCCEED_NODES_DIR
     mkdir -p $FAILED_NODES_DIR
 
-    aws s3 ls $S3_TESTS_SUCCESS_URL | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $SUCCEED_NODES_FILE
-    aws s3 ls $S3_TESTS_FAILURE_URL | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $FAILED_NODES_FILE
+    aws s3 ls $S3_TESTS_SUCCESS | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $SUCCEED_NODES_FILE
+    aws s3 ls $S3_TESTS_FAILURE | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $FAILED_NODES_FILE
 
     succeedCount=$(cat $SUCCEED_NODES_FILE | wc -l)
     failedCount=$(cat $FAILED_NODES_FILE | wc -l)
@@ -164,7 +66,7 @@
         cat $SUCCEED_NODES_FILE >> $REPORT_FILE
         echo "----------------------------------------------------------------------------------------------" >> $REPORT_FILE
 
-        aws s3 sync --delete $S3_TESTS_SUCCESS_URL $SUCCEED_NODES_DIR
+        aws s3 sync --delete $S3_TESTS_SUCCESS $SUCCEED_NODES_DIR
         if [ $? -ne 0 ]; then
             echo "[ERROR] Failed to get succeed tests details"
         else
@@ -178,7 +80,7 @@
         cat $FAILED_NODES_FILE >> $REPORT_FILE
         echo "----------------------------------------------------------------------------------------------" >> $REPORT_FILE
 
-        aws sync --delete $S3_TESTS_FAILURE_URL $FAILED_NODES_DIR
+        aws sync --delete $S3_TESTS_FAILURE $FAILED_NODES_DIR
         if [ $? -ne 0 ]; then
             echo "[ERROR] Failed to get failed tests details"
         else
@@ -204,34 +106,35 @@
         return 1
     fi
 
-    aws s3 cp --sse AES256 $HOME/tests-summary.zip $S3_TESTS_SUMMARY_URL
+    aws s3 cp --sse AES256 $HOME/tests-summary.zip $S3_TESTS_SUMMARY
     if [ $? -ne 0 ]; then
         echo "-------------------------------------------------------------------------------------"
-        echo "[ERROR] Failed to uploat tests summary archive to: $S3_TESTS_SUMMARY_URL"
+        echo "[ERROR] Failed to uploat tests summary archive to: $S3_TESTS_SUMMARY"
         echo "-------------------------------------------------------------------------------------"
     else
         echo "-------------------------------------------------------------------------------------"
-        echo "[INFO] Tests results summary uploaded to: $S3_TESTS_SUMMARY_URL"
+        echo "[INFO] Tests results summary uploaded to: $S3_TESTS_SUMMARY"
         echo "-------------------------------------------------------------------------------------"
     fi
 
     rm -f $HOME/tests-summary.zip
 }
 
+# Creates report for succeed tests
 reportSucceedTestsStatistics()
 {
-    writeMsg=0
-    writeErrors=0
-    writeSpeed=0
-    blkWriteMsg=0
-    blkWriteErrors=0
-    blkWriteSpeed=0
-    readMsg=0
-    readErrors=0
-    readSpeed=0
-    blkReadMsg=0
-    blkReadErrors=0
-    blkReadSpeed=0
+    writeMsg="0"
+    writeErrors="0"
+    writeSpeed="0"
+    blkWriteMsg="0"
+    blkWriteErrors="0"
+    blkWriteSpeed="0"
+    readMsg="0"
+    readErrors="0"
+    readSpeed="0"
+    blkReadMsg="0"
+    blkReadErrors="0"
+    blkReadSpeed="0"
 
     writeErrNodes=
     blkWriteErrNodes=
@@ -271,7 +174,7 @@
 
         cnt=$(cat $logFile | grep "^WRITE messages" | sed -r "s/WRITE messages: //g" | xargs)
         if [ -n "$cnt" ]; then
-            writeMsg=$(( $writeMsg+$cnt ))
+            writeMsg=$(bc <<< "$writeMsg + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] WRITE messages: $cnt"
             else
@@ -292,7 +195,7 @@
         cnt=$(cat $logFile | grep "^WRITE errors" | sed -r "s/WRITE errors: //g" | sed -r "s/,.*//g" | xargs)
         if [ -n "$cnt" ]; then
             echo "[INFO] WRITE errors: $cnt"
-            writeErrors=$(( $writeErrors+$cnt ))
+            writeErrors=$(bc <<< "$writeErrors + $cnt")
             if [ $cnt -ne 0 ]; then
                 if [ -n "$writeErrNodes" ]; then
                     writeErrNodes="${writeErrNodes}, "
@@ -309,7 +212,7 @@
 
         cnt=$(cat $logFile | grep "^WRITE speed" | sed -r "s/WRITE speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
         if [ -n "$cnt" ]; then
-            writeSpeed=$(( $writeSpeed+$cnt ))
+            writeSpeed=$(bc <<< "$writeSpeed + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] WRITE speed: $cnt msg/sec"
             else
@@ -329,7 +232,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_WRITE messages" | sed -r "s/BULK_WRITE messages: //g" | xargs)
         if [ -n "$cnt" ]; then
-            blkWriteMsg=$(( $blkWriteMsg+$cnt ))
+            blkWriteMsg=$(bc <<< "$blkWriteMsg + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] BULK_WRITE messages: $cnt"
             else
@@ -349,7 +252,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_WRITE errors" | sed -r "s/BULK_WRITE errors: //g" | sed -r "s/,.*//g" | xargs)
         if [ -n "$cnt" ]; then
-            blkWriteErrors=$(( $blkWriteErrors+$cnt ))
+            blkWriteErrors=$(bc <<< "$blkWriteErrors + $cnt")
             echo "[INFO] BULK_WRITE errors: $cnt"
             if [ $cnt -ne 0 ]; then
                 if [ -n "$blkWriteErrNodes" ]; then
@@ -367,7 +270,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_WRITE speed" | sed -r "s/BULK_WRITE speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
         if [ -n "$cnt" ]; then
-            blkWriteSpeed=$(( $blkWriteSpeed+$cnt ))
+            blkWriteSpeed=$(bc <<< "$blkWriteSpeed + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] BULK_WRITE speed: $cnt msg/sec"
             else
@@ -387,7 +290,7 @@
 
         cnt=$(cat $logFile | grep "^READ messages" | sed -r "s/READ messages: //g" | xargs)
         if [ -n "$cnt" ]; then
-            readMsg=$(( $readMsg+$cnt ))
+            readMsg=$(bc <<< "$readMsg + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] READ messages: $cnt"
             else
@@ -407,7 +310,7 @@
 
         cnt=$(cat $logFile | grep "^READ errors" | sed -r "s/READ errors: //g" | sed -r "s/,.*//g" | xargs)
         if [ -n "$cnt" ]; then
-            readErrors=$(( $readErrors+$cnt ))
+            readErrors=$(bc <<< "$readErrors + $cnt")
             echo "[INFO] READ errors: $cnt"
             if [ $cnt -ne 0 ]; then
                 if [ -n "$readErrNodes" ]; then
@@ -425,7 +328,7 @@
 
         cnt=$(cat $logFile | grep "^READ speed" | sed -r "s/READ speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
         if [ -n "$cnt" ]; then
-            readSpeed=$(( $readSpeed+$cnt ))
+            readSpeed=$(bc <<< "$readSpeed + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] READ speed: $cnt msg/sec"
             else
@@ -445,7 +348,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_READ messages" | sed -r "s/BULK_READ messages: //g" | xargs)
         if [ -n "$cnt" ]; then
-            blkReadMsg=$(( $blkReadMsg+$cnt ))
+            blkReadMsg=$(bc <<< "$blkReadMsg + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] BULK_READ messages: $cnt"
             else
@@ -465,7 +368,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_READ errors" | sed -r "s/BULK_READ errors: //g" | sed -r "s/,.*//g" | xargs)
         if [ -n "$cnt" ]; then
-            blkReadErrors=$(( $blkReadErrors+$cnt ))
+            blkReadErrors=$(bc <<< "$blkReadErrors + $cnt")
             echo "[INFO] BULK_READ errors: $cnt"
             if [ $cnt -ne 0 ]; then
                 if [ -n "$blkReadErrNodes" ]; then
@@ -483,7 +386,7 @@
 
         cnt=$(cat $logFile | grep "^BULK_READ speed" | sed -r "s/BULK_READ speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
         if [ -n "$cnt" ]; then
-            blkReadSpeed=$(( $blkReadSpeed+$cnt ))
+            blkReadSpeed=$(bc <<< "$blkReadSpeed + $cnt")
             if [ $cnt -ne 0 ]; then
                 echo "[INFO] BULK_READ speed: $cnt msg/sec"
             else
@@ -565,6 +468,7 @@
     rm -f $tmpFile
 }
 
+# Creates report for failed tests
 reportFailedTestsDetailes()
 {
     for dir in $2/*
@@ -586,5 +490,10 @@
     done
 }
 
-validate
+#######################################################################################################
+
+if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
+    terminate "Incorrect tests type specified: $TESTS_TYPE"
+fi
+
 reportTestsSummary
\ No newline at end of file
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
new file mode 100644
index 0000000..f9e9649
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
@@ -0,0 +1,696 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import org.apache.ignite.cache.store.CacheStore;
+import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.tests.pojos.Person;
+import org.apache.ignite.tests.pojos.PersonId;
+import org.apache.ignite.tests.pojos.Product;
+import org.apache.ignite.tests.pojos.ProductOrder;
+import org.apache.ignite.tests.utils.CacheStoreHelper;
+import org.apache.ignite.tests.utils.CassandraHelper;
+import org.apache.ignite.tests.utils.TestCacheSession;
+import org.apache.ignite.tests.utils.TestTransaction;
+import org.apache.ignite.tests.utils.TestsHelper;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.log4j.Logger;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.springframework.core.io.ClassPathResource;
+
+/**
+ * Unit tests for {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore} implementation of
+ * {@link org.apache.ignite.cache.store.CacheStore} which allows to store Ignite cache data into Cassandra tables.
+ */
+public class CassandraDirectPersistenceTest {
+    /** */
+    private static final Logger LOGGER = Logger.getLogger(CassandraDirectPersistenceTest.class.getName());
+
+    /** */
+    @BeforeClass
+    public static void setUpClass() {
+        if (CassandraHelper.useEmbeddedCassandra()) {
+            try {
+                CassandraHelper.startEmbeddedCassandra(LOGGER);
+            }
+            catch (Throwable e) {
+                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
+            }
+        }
+
+        LOGGER.info("Testing admin connection to Cassandra");
+        CassandraHelper.testAdminConnection();
+
+        LOGGER.info("Testing regular connection to Cassandra");
+        CassandraHelper.testRegularConnection();
+
+        LOGGER.info("Dropping all artifacts from previous tests execution session");
+        CassandraHelper.dropTestKeyspaces();
+
+        LOGGER.info("Start tests execution");
+    }
+
+    /** */
+    @AfterClass
+    public static void tearDownClass() {
+        try {
+            CassandraHelper.dropTestKeyspaces();
+        }
+        finally {
+            CassandraHelper.releaseCassandraResources();
+
+            if (CassandraHelper.useEmbeddedCassandra()) {
+                try {
+                    CassandraHelper.stopEmbeddedCassandra();
+                }
+                catch (Throwable e) {
+                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
+                }
+            }
+        }
+    }
+
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void primitiveStrategyTest() {
+        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store2 = CacheStoreHelper.createCacheStore("stringTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
+        Collection<CacheEntryImpl<String, String>> strEntries = TestsHelper.generateStringsEntries();
+
+        Collection<Long> fakeLongKeys = TestsHelper.getKeys(longEntries);
+        fakeLongKeys.add(-1L);
+        fakeLongKeys.add(-2L);
+        fakeLongKeys.add(-3L);
+        fakeLongKeys.add(-4L);
+
+        Collection<String> fakeStrKeys = TestsHelper.getKeys(strEntries);
+        fakeStrKeys.add("-1");
+        fakeStrKeys.add("-2");
+        fakeStrKeys.add("-3");
+        fakeStrKeys.add("-4");
+
+        LOGGER.info("Running PRIMITIVE strategy write tests");
+
+        LOGGER.info("Running single write operation tests");
+        store1.write(longEntries.iterator().next());
+        store2.write(strEntries.iterator().next());
+        LOGGER.info("Single write operation tests passed");
+
+        LOGGER.info("Running bulk write operation tests");
+        store1.writeAll(longEntries);
+        store2.writeAll(strEntries);
+        LOGGER.info("Bulk write operation tests passed");
+
+        LOGGER.info("PRIMITIVE strategy write tests passed");
+
+        LOGGER.info("Running PRIMITIVE strategy read tests");
+
+        LOGGER.info("Running single read operation tests");
+
+        LOGGER.info("Running real keys read tests");
+
+        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
+        if (!longEntries.iterator().next().getValue().equals(longVal))
+            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
+
+        String strVal = (String)store2.load(strEntries.iterator().next().getKey());
+        if (!strEntries.iterator().next().getValue().equals(strVal))
+            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Running fake keys read tests");
+
+        longVal = (Long)store1.load(-1L);
+        if (longVal != null)
+            throw new RuntimeException("Long value with fake key '-1' was found in Cassandra");
+
+        strVal = (String)store2.load("-1");
+        if (strVal != null)
+            throw new RuntimeException("String value with fake key '-1' was found in Cassandra");
+
+        LOGGER.info("Single read operation tests passed");
+
+        LOGGER.info("Running bulk read operation tests");
+
+        LOGGER.info("Running real keys read tests");
+
+        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
+        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
+            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
+
+        Map strValues = store2.loadAll(TestsHelper.getKeys(strEntries));
+        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
+            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Running fake keys read tests");
+
+        longValues = store1.loadAll(fakeLongKeys);
+        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
+            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
+
+        strValues = store2.loadAll(fakeStrKeys);
+        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
+            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Bulk read operation tests passed");
+
+        LOGGER.info("PRIMITIVE strategy read tests passed");
+
+        LOGGER.info("Running PRIMITIVE strategy delete tests");
+
+        LOGGER.info("Deleting real keys");
+
+        store1.delete(longEntries.iterator().next().getKey());
+        store1.deleteAll(TestsHelper.getKeys(longEntries));
+
+        store2.delete(strEntries.iterator().next().getKey());
+        store2.deleteAll(TestsHelper.getKeys(strEntries));
+
+        LOGGER.info("Deleting fake keys");
+
+        store1.delete(-1L);
+        store2.delete("-1");
+
+        store1.deleteAll(fakeLongKeys);
+        store2.deleteAll(fakeStrKeys);
+
+        LOGGER.info("PRIMITIVE strategy delete tests passed");
+    }
+
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void blobStrategyTest() {
+        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
+        Collection<CacheEntryImpl<Long, Person>> personEntries = TestsHelper.generateLongsPersonsEntries();
+
+        LOGGER.info("Running BLOB strategy write tests");
+
+        LOGGER.info("Running single write operation tests");
+        store1.write(longEntries.iterator().next());
+        store2.write(personEntries.iterator().next());
+        store3.write(personEntries.iterator().next());
+        LOGGER.info("Single write operation tests passed");
+
+        LOGGER.info("Running bulk write operation tests");
+        store1.writeAll(longEntries);
+        store2.writeAll(personEntries);
+        store3.writeAll(personEntries);
+        LOGGER.info("Bulk write operation tests passed");
+
+        LOGGER.info("BLOB strategy write tests passed");
+
+        LOGGER.info("Running BLOB strategy read tests");
+
+        LOGGER.info("Running single read operation tests");
+
+        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
+        if (!longEntries.iterator().next().getValue().equals(longVal))
+            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
+
+        Person personVal = (Person)store2.load(personEntries.iterator().next().getKey());
+        if (!personEntries.iterator().next().getValue().equals(personVal))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        personVal = (Person)store3.load(personEntries.iterator().next().getKey());
+        if (!personEntries.iterator().next().getValue().equals(personVal))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Single read operation tests passed");
+
+        LOGGER.info("Running bulk read operation tests");
+
+        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
+        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
+            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
+
+        Map personValues = store2.loadAll(TestsHelper.getKeys(personEntries));
+        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        personValues = store3.loadAll(TestsHelper.getKeys(personEntries));
+        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Bulk read operation tests passed");
+
+        LOGGER.info("BLOB strategy read tests passed");
+
+        LOGGER.info("Running BLOB strategy delete tests");
+
+        store1.delete(longEntries.iterator().next().getKey());
+        store1.deleteAll(TestsHelper.getKeys(longEntries));
+
+        store2.delete(personEntries.iterator().next().getKey());
+        store2.deleteAll(TestsHelper.getKeys(personEntries));
+
+        store3.delete(personEntries.iterator().next().getKey());
+        store3.deleteAll(TestsHelper.getKeys(personEntries));
+
+        LOGGER.info("BLOB strategy delete tests passed");
+    }
+
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void pojoStrategyTest() {
+        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore store4 = CacheStoreHelper.createCacheStore("persons",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        Collection<CacheEntryImpl<Long, Person>> entries1 = TestsHelper.generateLongsPersonsEntries();
+        Collection<CacheEntryImpl<PersonId, Person>> entries2 = TestsHelper.generatePersonIdsPersonsEntries();
+        Collection<CacheEntryImpl<PersonId, Person>> entries3 = TestsHelper.generatePersonIdsPersonsEntries();
+        Collection<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
+        Collection<CacheEntryImpl<Long, ProductOrder>> orderEntries = TestsHelper.generateOrderEntries();
+
+        LOGGER.info("Running POJO strategy write tests");
+
+        LOGGER.info("Running single write operation tests");
+        store1.write(entries1.iterator().next());
+        store2.write(entries2.iterator().next());
+        store3.write(entries3.iterator().next());
+        store4.write(entries3.iterator().next());
+        productStore.write(productEntries.iterator().next());
+        orderStore.write(orderEntries.iterator().next());
+        LOGGER.info("Single write operation tests passed");
+
+        LOGGER.info("Running bulk write operation tests");
+        store1.writeAll(entries1);
+        store2.writeAll(entries2);
+        store3.writeAll(entries3);
+        store4.writeAll(entries3);
+        productStore.writeAll(productEntries);
+        orderStore.writeAll(orderEntries);
+        LOGGER.info("Bulk write operation tests passed");
+
+        LOGGER.info("POJO strategy write tests passed");
+
+        LOGGER.info("Running POJO strategy read tests");
+
+        LOGGER.info("Running single read operation tests");
+
+        Person person = (Person)store1.load(entries1.iterator().next().getKey());
+        if (!entries1.iterator().next().getValue().equalsPrimitiveFields(person))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        person = (Person)store2.load(entries2.iterator().next().getKey());
+        if (!entries2.iterator().next().getValue().equalsPrimitiveFields(person))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        person = (Person)store3.load(entries3.iterator().next().getKey());
+        if (!entries3.iterator().next().getValue().equals(person))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        person = (Person)store4.load(entries3.iterator().next().getKey());
+        if (!entries3.iterator().next().getValue().equals(person))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        Product product = (Product)productStore.load(productEntries.iterator().next().getKey());
+        if (!productEntries.iterator().next().getValue().equals(product))
+            throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
+
+        ProductOrder order = (ProductOrder)orderStore.load(orderEntries.iterator().next().getKey());
+        if (!orderEntries.iterator().next().getValue().equals(order))
+            throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Single read operation tests passed");
+
+        LOGGER.info("Running bulk read operation tests");
+
+        Map persons = store1.loadAll(TestsHelper.getKeys(entries1));
+        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries1, true))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        persons = store2.loadAll(TestsHelper.getKeys(entries2));
+        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries2, true))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        persons = store3.loadAll(TestsHelper.getKeys(entries3));
+        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        persons = store4.loadAll(TestsHelper.getKeys(entries3));
+        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
+            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
+
+        Map products = productStore.loadAll(TestsHelper.getKeys(productEntries));
+        if (!TestsHelper.checkProductCollectionsEqual(products, productEntries))
+            throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
+
+        Map orders = orderStore.loadAll(TestsHelper.getKeys(orderEntries));
+        if (!TestsHelper.checkOrderCollectionsEqual(orders, orderEntries))
+            throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
+
+        LOGGER.info("Bulk read operation tests passed");
+
+        LOGGER.info("POJO strategy read tests passed");
+
+        LOGGER.info("Running POJO strategy delete tests");
+
+        store1.delete(entries1.iterator().next().getKey());
+        store1.deleteAll(TestsHelper.getKeys(entries1));
+
+        store2.delete(entries2.iterator().next().getKey());
+        store2.deleteAll(TestsHelper.getKeys(entries2));
+
+        store3.delete(entries3.iterator().next().getKey());
+        store3.deleteAll(TestsHelper.getKeys(entries3));
+
+        store4.delete(entries3.iterator().next().getKey());
+        store4.deleteAll(TestsHelper.getKeys(entries3));
+
+        productStore.delete(productEntries.iterator().next().getKey());
+        productStore.deleteAll(TestsHelper.getKeys(productEntries));
+
+        orderStore.delete(orderEntries.iterator().next().getKey());
+        orderStore.deleteAll(TestsHelper.getKeys(orderEntries));
+
+        LOGGER.info("POJO strategy delete tests passed");
+    }
+
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void pojoStrategyTransactionTest() {
+        Map<Object, Object> sessionProps = U.newHashMap(1);
+        Transaction sessionTx = new TestTransaction();
+
+        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
+            CassandraHelper.getAdminDataSrc(), new TestCacheSession("product", sessionTx, sessionProps));
+
+        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
+            CassandraHelper.getAdminDataSrc(), new TestCacheSession("order", sessionTx, sessionProps));
+
+        List<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
+        Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> ordersPerProduct =
+                TestsHelper.generateOrdersPerProductEntries(productEntries, 2);
+
+        Collection<Long> productIds =  TestsHelper.getProductIds(productEntries);
+        Collection<Long> orderIds =  TestsHelper.getOrderIds(ordersPerProduct);
+
+        LOGGER.info("Running POJO strategy transaction write tests");
+
+        LOGGER.info("Running single write operation tests");
+
+        CassandraHelper.dropTestKeyspaces();
+
+        Product product = productEntries.iterator().next().getValue();
+        ProductOrder order = ordersPerProduct.get(product.getId()).iterator().next().getValue();
+
+        productStore.write(productEntries.iterator().next());
+        orderStore.write(ordersPerProduct.get(product.getId()).iterator().next());
+
+        if (productStore.load(product.getId()) != null || orderStore.load(order.getId()) != null) {
+            throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already persisted into Cassandra");
+        }
+
+        Map<Long, Product> products = (Map<Long, Product>)productStore.loadAll(productIds);
+        Map<Long, ProductOrder> orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
+            throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already persisted into Cassandra");
+        }
+
+        //noinspection deprecation
+        orderStore.sessionEnd(true);
+        //noinspection deprecation
+        productStore.sessionEnd(true);
+
+        Product product1 = (Product)productStore.load(product.getId());
+        ProductOrder order1 = (ProductOrder)orderStore.load(order.getId());
+
+        if (product1 == null || order1 == null) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "no objects were persisted into Cassandra");
+        }
+
+        if (!product.equals(product1) || !order.equals(order1)) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "objects were incorrectly persisted/loaded to/from Cassandra");
+        }
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "no objects were persisted into Cassandra");
+        }
+
+        if (products.size() > 1 || orders.size() > 1) {
+            throw new RuntimeException("Single write operation test failed. There were committed more objects " +
+                    "into Cassandra than expected");
+        }
+
+        product1 = products.entrySet().iterator().next().getValue();
+        order1 = orders.entrySet().iterator().next().getValue();
+
+        if (!product.equals(product1) || !order.equals(order1)) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "objects were incorrectly persisted/loaded to/from Cassandra");
+        }
+
+        LOGGER.info("Single write operation tests passed");
+
+        LOGGER.info("Running bulk write operation tests");
+
+        CassandraHelper.dropTestKeyspaces();
+        sessionProps.clear();
+
+        productStore.writeAll(productEntries);
+
+        for (Long productId : ordersPerProduct.keySet())
+            orderStore.writeAll(ordersPerProduct.get(productId));
+
+        for (Long productId : productIds) {
+            if (productStore.load(productId) != null) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already persisted into Cassandra");
+            }
+        }
+
+        for (Long orderId : orderIds) {
+            if (orderStore.load(orderId) != null) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already persisted into Cassandra");
+            }
+        }
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
+            throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already persisted into Cassandra");
+        }
+
+        //noinspection deprecation
+        productStore.sessionEnd(true);
+        //noinspection deprecation
+        orderStore.sessionEnd(true);
+
+        for (CacheEntryImpl<Long, Product> entry : productEntries) {
+            product = (Product)productStore.load(entry.getKey());
+
+            if (!entry.getValue().equals(product)) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                        "not all objects were persisted into Cassandra");
+            }
+        }
+
+        for (Long productId : ordersPerProduct.keySet()) {
+            for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
+                order = (ProductOrder)orderStore.load(entry.getKey());
+
+                if (!entry.getValue().equals(order)) {
+                    throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                            "not all objects were persisted into Cassandra");
+                }
+            }
+        }
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
+            throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                    "no objects were persisted into Cassandra");
+        }
+
+        if (products.size() < productIds.size() || orders.size() < orderIds.size()) {
+            throw new RuntimeException("Bulk write operation test failed. There were committed less objects " +
+                    "into Cassandra than expected");
+        }
+
+        if (products.size() > productIds.size() || orders.size() > orderIds.size()) {
+            throw new RuntimeException("Bulk write operation test failed. There were committed more objects " +
+                    "into Cassandra than expected");
+        }
+
+        for (CacheEntryImpl<Long, Product> entry : productEntries) {
+            product = products.get(entry.getKey());
+
+            if (!entry.getValue().equals(product)) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                        "some objects were incorrectly persisted/loaded to/from Cassandra");
+            }
+        }
+
+        for (Long productId : ordersPerProduct.keySet()) {
+            for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
+                order = orders.get(entry.getKey());
+
+                if (!entry.getValue().equals(order)) {
+                    throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                            "some objects were incorrectly persisted/loaded to/from Cassandra");
+                }
+            }
+        }
+
+        LOGGER.info("Bulk write operation tests passed");
+
+        LOGGER.info("POJO strategy transaction write tests passed");
+
+        LOGGER.info("Running POJO strategy transaction delete tests");
+
+        LOGGER.info("Running single delete tests");
+
+        sessionProps.clear();
+
+        Product deletedProduct = productEntries.remove(0).getValue();
+        ProductOrder deletedOrder = ordersPerProduct.get(deletedProduct.getId()).remove(0).getValue();
+
+        productStore.delete(deletedProduct.getId());
+        orderStore.delete(deletedOrder.getId());
+
+        if (productStore.load(deletedProduct.getId()) == null || orderStore.load(deletedOrder.getId()) == null) {
+            throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already deleted from Cassandra");
+        }
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if (products.size() != productIds.size() || orders.size() != orderIds.size()) {
+            throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already deleted from Cassandra");
+        }
+
+        //noinspection deprecation
+        productStore.sessionEnd(true);
+        //noinspection deprecation
+        orderStore.sessionEnd(true);
+
+        if (productStore.load(deletedProduct.getId()) != null || orderStore.load(deletedOrder.getId()) != null) {
+            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
+                    "objects were not deleted from Cassandra");
+        }
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if (products.get(deletedProduct.getId()) != null || orders.get(deletedOrder.getId()) != null) {
+            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
+                    "objects were not deleted from Cassandra");
+        }
+
+        LOGGER.info("Single delete tests passed");
+
+        LOGGER.info("Running bulk delete tests");
+
+        sessionProps.clear();
+
+        productStore.deleteAll(productIds);
+        orderStore.deleteAll(orderIds);
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
+            throw new RuntimeException("Bulk delete operation test failed. Transaction wasn't committed yet, but " +
+                    "objects were already deleted from Cassandra");
+        }
+
+        //noinspection deprecation
+        orderStore.sessionEnd(true);
+        //noinspection deprecation
+        productStore.sessionEnd(true);
+
+        products = (Map<Long, Product>)productStore.loadAll(productIds);
+        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
+
+        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
+            throw new RuntimeException("Bulk delete operation test failed. Transaction was committed, but " +
+                    "objects were not deleted from Cassandra");
+        }
+
+        LOGGER.info("Bulk delete tests passed");
+
+        LOGGER.info("POJO strategy transaction delete tests passed");
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java
new file mode 100644
index 0000000..eea4e9e
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import org.apache.ignite.tests.utils.CassandraHelper;
+import org.apache.log4j.Logger;
+
+/**
+ * Simple helper class to run Cassandra on localhost
+ */
+public class CassandraLocalServer {
+    /** */
+    private static final Logger LOGGER = Logger.getLogger(CassandraLocalServer.class.getName());
+
+    /** */
+    public static void main(String[] args) {
+        try {
+            CassandraHelper.startEmbeddedCassandra(LOGGER);
+        }
+        catch (Throwable e) {
+            throw new RuntimeException("Failed to start embedded Cassandra instance", e);
+        }
+
+        LOGGER.info("Testing admin connection to Cassandra");
+        CassandraHelper.testAdminConnection();
+
+        LOGGER.info("Testing regular connection to Cassandra");
+        CassandraHelper.testRegularConnection();
+
+        LOGGER.info("Dropping all artifacts from previous tests execution session");
+        CassandraHelper.dropTestKeyspaces();
+
+        while (true) {
+            try {
+                System.out.println("Cassandra server running");
+
+                Thread.sleep(10000);
+            }
+            catch (Throwable e) {
+                throw new RuntimeException("Cassandra server terminated", e);
+            }
+        }
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
new file mode 100644
index 0000000..e982e16
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import java.net.URL;
+import org.apache.ignite.cache.store.cassandra.utils.DDLGenerator;
+import org.junit.Test;
+
+/**
+ * DDLGenerator test.
+ */
+public class DDLGeneratorTest {
+    /** */
+    private static final String[] RESOURCES = new String[] {
+        "org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml",
+        "org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml",
+        "org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml",
+        "org/apache/ignite/tests/persistence/pojo/product.xml",
+        "org/apache/ignite/tests/persistence/pojo/order.xml"
+    };
+
+    /**
+     * Test DDL generator.
+     */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void generatorTest() {
+        String[] files = new String[RESOURCES.length];
+
+        ClassLoader clsLdr = DDLGeneratorTest.class.getClassLoader();
+
+        for (int i = 0; i < RESOURCES.length; i++) {
+            URL url = clsLdr.getResource(RESOURCES[i]);
+            if (url == null)
+                throw new IllegalStateException("Failed to find resource: " + RESOURCES[i]);
+
+            files[i] = url.getFile();
+        }
+
+        DDLGenerator.main(files);
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java
new file mode 100644
index 0000000..ceb90e0
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ConsistencyLevel;
+import com.datastax.driver.core.Host;
+import com.datastax.driver.core.HostDistance;
+import com.datastax.driver.core.Statement;
+import com.datastax.driver.core.policies.LoadBalancingPolicy;
+import com.datastax.driver.core.policies.RoundRobinPolicy;
+import com.datastax.driver.core.policies.TokenAwarePolicy;
+
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
+import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
+import org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer;
+import org.apache.ignite.tests.utils.CassandraAdminCredentials;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for datasource serialization.
+ */
+public class DatasourceSerializationTest {
+    /**
+     * Sample class for serialization test.
+     */
+    private static class MyLoadBalancingPolicy implements LoadBalancingPolicy, Serializable {
+        /** */
+        private transient LoadBalancingPolicy plc = new TokenAwarePolicy(new RoundRobinPolicy());
+
+        /** {@inheritDoc} */
+        @Override public void init(Cluster cluster, Collection<Host> hosts) {
+            plc.init(cluster, hosts);
+        }
+
+        /** {@inheritDoc} */
+        @Override public HostDistance distance(Host host) {
+            return plc.distance(host);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Iterator<Host> newQueryPlan(String loggedKeyspace, Statement statement) {
+            return plc.newQueryPlan(loggedKeyspace, statement);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onAdd(Host host) {
+            plc.onAdd(host);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onUp(Host host) {
+            plc.onUp(host);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onDown(Host host) {
+            plc.onDown(host);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onRemove(Host host) {
+            plc.onRemove(host);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() {
+            plc.close();
+        }
+    }
+
+    /**
+     * Serialization test.
+     */
+    @Test
+    public void serializationTest() {
+        DataSource src = new DataSource();
+
+        Credentials cred = new CassandraAdminCredentials();
+        String[] points = new String[]{"127.0.0.1", "10.0.0.2", "10.0.0.3"};
+        LoadBalancingPolicy plc = new MyLoadBalancingPolicy();
+
+        src.setCredentials(cred);
+        src.setContactPoints(points);
+        src.setReadConsistency("ONE");
+        src.setWriteConsistency("QUORUM");
+        src.setLoadBalancingPolicy(plc);
+
+        JavaSerializer serializer = new JavaSerializer();
+
+        ByteBuffer buff = serializer.serialize(src);
+        DataSource _src = (DataSource)serializer.deserialize(buff);
+
+        Credentials _cred = (Credentials)getFieldValue(_src, "creds");
+        List<InetAddress> _points = (List<InetAddress>)getFieldValue(_src, "contactPoints");
+        ConsistencyLevel _readCons = (ConsistencyLevel)getFieldValue(_src, "readConsistency");
+        ConsistencyLevel _writeCons = (ConsistencyLevel)getFieldValue(_src, "writeConsistency");
+        LoadBalancingPolicy _plc = (LoadBalancingPolicy)getFieldValue(_src, "loadBalancingPlc");
+
+        assertTrue("Incorrectly serialized/deserialized credentials for Cassandra DataSource",
+            cred.getPassword().equals(_cred.getPassword()) && cred.getUser().equals(_cred.getUser()));
+
+        assertTrue("Incorrectly serialized/deserialized contact points for Cassandra DataSource",
+            "/127.0.0.1".equals(_points.get(0).toString()) &&
+            "/10.0.0.2".equals(_points.get(1).toString()) &&
+            "/10.0.0.3".equals(_points.get(2).toString()));
+
+        assertTrue("Incorrectly serialized/deserialized consistency levels for Cassandra DataSource",
+            ConsistencyLevel.ONE == _readCons && ConsistencyLevel.QUORUM == _writeCons);
+
+        assertTrue("Incorrectly serialized/deserialized load balancing policy for Cassandra DataSource",
+            _plc instanceof MyLoadBalancingPolicy);
+    }
+
+    /**
+     * @param obj Object.
+     * @param field Field name.
+     * @return Field value.
+     */
+    private Object getFieldValue(Object obj, String field) {
+        try {
+            Field f = obj.getClass().getDeclaredField(field);
+
+            f.setAccessible(true);
+
+            return f.get(obj);
+        }
+        catch (Throwable e) {
+            throw new RuntimeException("Failed to get field '" + field + "' value", e);
+        }
+    }
+}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
new file mode 100644
index 0000000..97e7230
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
@@ -0,0 +1,666 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import java.util.Collection;
+import java.util.Date;
+import java.util.Map;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteTransactions;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.cache.store.CacheStore;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.tests.pojos.Person;
+import org.apache.ignite.tests.pojos.PersonId;
+import org.apache.ignite.tests.pojos.Product;
+import org.apache.ignite.tests.pojos.ProductOrder;
+import org.apache.ignite.tests.utils.CacheStoreHelper;
+import org.apache.ignite.tests.utils.CassandraHelper;
+import org.apache.ignite.tests.utils.TestsHelper;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.apache.log4j.Logger;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.springframework.core.io.ClassPathResource;
+
+/**
+ * Unit tests for Ignite caches which utilizing {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}
+ * to store cache data into Cassandra tables
+ */
+public class IgnitePersistentStoreTest {
+    /** */
+    private static final Logger LOGGER = Logger.getLogger(IgnitePersistentStoreTest.class.getName());
+
+    /** */
+    @BeforeClass
+    public static void setUpClass() {
+        if (CassandraHelper.useEmbeddedCassandra()) {
+            try {
+                CassandraHelper.startEmbeddedCassandra(LOGGER);
+            }
+            catch (Throwable e) {
+                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
+            }
+        }
+
+        LOGGER.info("Testing admin connection to Cassandra");
+        CassandraHelper.testAdminConnection();
+
+        LOGGER.info("Testing regular connection to Cassandra");
+        CassandraHelper.testRegularConnection();
+
+        LOGGER.info("Dropping all artifacts from previous tests execution session");
+        CassandraHelper.dropTestKeyspaces();
+
+        LOGGER.info("Start tests execution");
+    }
+
+    /** */
+    @AfterClass
+    public static void tearDownClass() {
+        try {
+            CassandraHelper.dropTestKeyspaces();
+        }
+        finally {
+            CassandraHelper.releaseCassandraResources();
+
+            if (CassandraHelper.useEmbeddedCassandra()) {
+                try {
+                    CassandraHelper.stopEmbeddedCassandra();
+                }
+                catch (Throwable e) {
+                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
+                }
+            }
+        }
+    }
+
+    /** */
+    @Test
+    public void primitiveStrategyTest() {
+        Ignition.stopAll(true);
+
+        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
+        Map<String, String> strMap = TestsHelper.generateStringsMap();
+
+        LOGGER.info("Running PRIMITIVE strategy write tests");
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
+            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
+            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
+
+            LOGGER.info("Running single operation write tests");
+            longCache.put(1L, 1L);
+            strCache.put("1", "1");
+            LOGGER.info("Single operation write tests passed");
+
+            LOGGER.info("Running bulk operation write tests");
+            longCache.putAll(longMap);
+            strCache.putAll(strMap);
+            LOGGER.info("Bulk operation write tests passed");
+        }
+
+        LOGGER.info("PRIMITIVE strategy write tests passed");
+
+        Ignition.stopAll(true);
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
+            LOGGER.info("Running PRIMITIVE strategy read tests");
+
+            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
+            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
+
+            LOGGER.info("Running single operation read tests");
+
+            Long longVal = longCache.get(1L);
+            if (!longVal.equals(longMap.get(1L)))
+                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
+
+            String strVal = strCache.get("1");
+            if (!strVal.equals(strMap.get("1")))
+                throw new RuntimeException("String value was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Single operation read tests passed");
+
+            LOGGER.info("Running bulk operation read tests");
+
+            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
+            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
+                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
+
+            Map<String, String> strMap1 = strCache.getAll(strMap.keySet());
+            if (!TestsHelper.checkMapsEqual(strMap, strMap1))
+                throw new RuntimeException("String values batch was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Bulk operation read tests passed");
+
+            LOGGER.info("PRIMITIVE strategy read tests passed");
+
+            LOGGER.info("Running PRIMITIVE strategy delete tests");
+
+            longCache.remove(1L);
+            longCache.removeAll(longMap.keySet());
+
+            strCache.remove("1");
+            strCache.removeAll(strMap.keySet());
+
+            LOGGER.info("PRIMITIVE strategy delete tests passed");
+        }
+    }
+
+    /** */
+    @Test
+    public void blobStrategyTest() {
+        Ignition.stopAll(true);
+
+        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
+        Map<Long, Person> personMap = TestsHelper.generateLongsPersonsMap();
+
+        LOGGER.info("Running BLOB strategy write tests");
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
+            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
+            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
+
+            LOGGER.info("Running single operation write tests");
+            longCache.put(1L, 1L);
+            personCache.put(1L, TestsHelper.generateRandomPerson(1L));
+            LOGGER.info("Single operation write tests passed");
+
+            LOGGER.info("Running bulk operation write tests");
+            longCache.putAll(longMap);
+            personCache.putAll(personMap);
+            LOGGER.info("Bulk operation write tests passed");
+        }
+
+        LOGGER.info("BLOB strategy write tests passed");
+
+        Ignition.stopAll(true);
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
+            LOGGER.info("Running BLOB strategy read tests");
+
+            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
+            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
+
+            LOGGER.info("Running single operation read tests");
+
+            Long longVal = longCache.get(1L);
+            if (!longVal.equals(longMap.get(1L)))
+                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
+
+            Person person = personCache.get(1L);
+            if (!person.equals(personMap.get(1L)))
+                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Single operation read tests passed");
+
+            LOGGER.info("Running bulk operation read tests");
+
+            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
+            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
+                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
+
+            Map<Long, Person> personMap1 = personCache.getAll(personMap.keySet());
+            if (!TestsHelper.checkPersonMapsEqual(personMap, personMap1, false))
+                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Bulk operation read tests passed");
+
+            LOGGER.info("BLOB strategy read tests passed");
+
+            LOGGER.info("Running BLOB strategy delete tests");
+
+            longCache.remove(1L);
+            longCache.removeAll(longMap.keySet());
+
+            personCache.remove(1L);
+            personCache.removeAll(personMap.keySet());
+
+            LOGGER.info("BLOB strategy delete tests passed");
+        }
+    }
+
+    /** */
+    @Test
+    public void pojoStrategyTest() {
+        Ignition.stopAll(true);
+
+        LOGGER.info("Running POJO strategy write tests");
+
+        Map<Long, Person> personMap1 = TestsHelper.generateLongsPersonsMap();
+        Map<PersonId, Person> personMap2 = TestsHelper.generatePersonIdsPersonsMap();
+        Map<Long, Product> productsMap = TestsHelper.generateProductsMap();
+        Map<Long, ProductOrder> ordersMap = TestsHelper.generateOrdersMap();
+
+        Product product = TestsHelper.generateRandomProduct(-1L);
+        ProductOrder order = TestsHelper.generateRandomOrder(-1L);
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
+            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
+            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
+            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
+            IgniteCache<PersonId, Person> personCache4 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache4"));
+            IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
+            IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
+
+            LOGGER.info("Running single operation write tests");
+
+            personCache1.put(1L, TestsHelper.generateRandomPerson(1L));
+
+            PersonId id = TestsHelper.generateRandomPersonId();
+            personCache2.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
+
+            id = TestsHelper.generateRandomPersonId();
+            personCache3.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
+            personCache4.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
+
+            productCache.put(product.getId(), product);
+            orderCache.put(order.getId(), order);
+
+            LOGGER.info("Single operation write tests passed");
+
+            LOGGER.info("Running bulk operation write tests");
+            personCache1.putAll(personMap1);
+            personCache2.putAll(personMap2);
+            personCache3.putAll(personMap2);
+            personCache4.putAll(personMap2);
+            productCache.putAll(productsMap);
+            orderCache.putAll(ordersMap);
+            LOGGER.info("Bulk operation write tests passed");
+        }
+
+        LOGGER.info("POJO strategy write tests passed");
+
+        Ignition.stopAll(true);
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
+            LOGGER.info("Running POJO strategy read tests");
+
+            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
+            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
+            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
+            IgniteCache<PersonId, Person> personCache4 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache4"));
+            IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
+            IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
+
+            LOGGER.info("Running single operation read tests");
+            Person person = personCache1.get(1L);
+            if (!person.equalsPrimitiveFields(personMap1.get(1L)))
+                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
+
+            PersonId id = personMap2.keySet().iterator().next();
+
+            person = personCache2.get(id);
+            if (!person.equalsPrimitiveFields(personMap2.get(id)))
+                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
+
+            person = personCache3.get(id);
+            if (!person.equals(personMap2.get(id)))
+                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
+
+            person = personCache4.get(id);
+            if (!person.equals(personMap2.get(id)))
+                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
+
+            Product product1 = productCache.get(product.getId());
+            if (!product.equals(product1))
+                throw new RuntimeException("Product value was incorrectly deserialized from Cassandra");
+
+            ProductOrder order1 = orderCache.get(order.getId());
+            if (!order.equals(order1))
+                throw new RuntimeException("Order value was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Single operation read tests passed");
+
+            LOGGER.info("Running bulk operation read tests");
+
+            Map<Long, Person> persons1 = personCache1.getAll(personMap1.keySet());
+            if (!TestsHelper.checkPersonMapsEqual(persons1, personMap1, true))
+                throw new RuntimeException("Persons values batch was incorrectly deserialized from Cassandra");
+
+            Map<PersonId, Person> persons2 = personCache2.getAll(personMap2.keySet());
+            if (!TestsHelper.checkPersonMapsEqual(persons2, personMap2, true))
+                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
+
+            Map<PersonId, Person> persons3 = personCache3.getAll(personMap2.keySet());
+            if (!TestsHelper.checkPersonMapsEqual(persons3, personMap2, false))
+                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
+
+            Map<PersonId, Person> persons4 = personCache4.getAll(personMap2.keySet());
+            if (!TestsHelper.checkPersonMapsEqual(persons4, personMap2, false))
+                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
+
+            Map<Long, Product> productsMap1 = productCache.getAll(productsMap.keySet());
+            if (!TestsHelper.checkProductMapsEqual(productsMap, productsMap1))
+                throw new RuntimeException("Product values batch was incorrectly deserialized from Cassandra");
+
+            Map<Long, ProductOrder> ordersMap1 = orderCache.getAll(ordersMap.keySet());
+            if (!TestsHelper.checkOrderMapsEqual(ordersMap, ordersMap1))
+                throw new RuntimeException("Order values batch was incorrectly deserialized from Cassandra");
+
+            LOGGER.info("Bulk operation read tests passed");
+
+            LOGGER.info("POJO strategy read tests passed");
+
+            LOGGER.info("Running POJO strategy delete tests");
+
+            personCache1.remove(1L);
+            personCache1.removeAll(personMap1.keySet());
+
+            personCache2.remove(id);
+            personCache2.removeAll(personMap2.keySet());
+
+            personCache3.remove(id);
+            personCache3.removeAll(personMap2.keySet());
+
+            personCache4.remove(id);
+            personCache4.removeAll(personMap2.keySet());
+
+            productCache.remove(product.getId());
+            productCache.removeAll(productsMap.keySet());
+
+            orderCache.remove(order.getId());
+            orderCache.removeAll(ordersMap.keySet());
+
+            LOGGER.info("POJO strategy delete tests passed");
+        }
+    }
+
+    /** */
+    @Test
+    public void pojoStrategyTransactionTest() {
+        CassandraHelper.dropTestKeyspaces();
+
+        Ignition.stopAll(true);
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.REPEATABLE_READ);
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.SERIALIZABLE);
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.READ_COMMITTED);
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ);
+            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.SERIALIZABLE);
+        }
+    }
+
+    /** */
+    @Test
+    public void loadCacheTest() {
+        Ignition.stopAll(true);
+
+        LOGGER.info("Running loadCache test");
+
+        LOGGER.info("Filling Cassandra table with test data");
+
+        CacheStore store = CacheStoreHelper.createCacheStore("personTypes",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        Collection<CacheEntryImpl<PersonId, Person>> entries = TestsHelper.generatePersonIdsPersonsEntries();
+
+        //noinspection unchecked
+        store.writeAll(entries);
+
+        LOGGER.info("Cassandra table filled with test data");
+
+        LOGGER.info("Running loadCache test");
+
+        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
+            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
+            int size = personCache3.size(CachePeekMode.ALL);
+
+            LOGGER.info("Initial cache size " + size);
+
+            LOGGER.info("Loading cache data from Cassandra table");
+
+            personCache3.loadCache(null, new String[] {"select * from test1.pojo_test3 limit 3"});
+
+            size = personCache3.size(CachePeekMode.ALL);
+            if (size != 3) {
+                throw new RuntimeException("Cache data was incorrectly loaded from Cassandra. " +
+                    "Expected number of records is 3, but loaded number of records is " + size);
+            }
+
+            personCache3.clear();
+
+            personCache3.loadCache(null);
+
+            size = personCache3.size(CachePeekMode.ALL);
+            if (size != TestsHelper.getBulkOperationSize()) {
+                throw new RuntimeException("Cache data was incorrectly loaded from Cassandra. " +
+                    "Expected number of records is " + TestsHelper.getBulkOperationSize() +
+                    ", but loaded number of records is " + size);
+            }
+
+            LOGGER.info("Cache data loaded from Cassandra table");
+        }
+
+        LOGGER.info("loadCache test passed");
+    }
+
+    /** */
+    @SuppressWarnings("unchecked")
+    private void pojoStrategyTransactionTest(Ignite ignite, TransactionConcurrency concurrency,
+                                             TransactionIsolation isolation) {
+        LOGGER.info("-----------------------------------------------------------------------------------");
+        LOGGER.info("Running POJO transaction tests using " + concurrency +
+                " concurrency and " + isolation + " isolation level");
+        LOGGER.info("-----------------------------------------------------------------------------------");
+
+        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
+            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
+            CassandraHelper.getAdminDataSrc());
+
+        Map<Long, Product> productsMap = TestsHelper.generateProductsMap(5);
+        Map<Long, Product> productsMap1;
+        Map<Long, ProductOrder> ordersMap = TestsHelper.generateOrdersMap(5);
+        Map<Long, ProductOrder> ordersMap1;
+        Product product = TestsHelper.generateRandomProduct(-1L);
+        ProductOrder order = TestsHelper.generateRandomOrder(-1L, -1L, new Date());
+
+        IgniteTransactions txs = ignite.transactions();
+
+        IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
+        IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
+
+        LOGGER.info("Running POJO strategy write tests");
+
+        LOGGER.info("Running single operation write tests");
+
+        Transaction tx = txs.txStart(concurrency, isolation);
+
+        try {
+            productCache.put(product.getId(), product);
+            orderCache.put(order.getId(), order);
+
+            if (productStore.load(product.getId()) != null || orderStore.load(order.getId()) != null) {
+                throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already persisted into Cassandra");
+            }
+
+            Map<Long, Product> products = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
+            Map<Long, ProductOrder> orders = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
+
+            if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
+                throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already persisted into Cassandra");
+            }
+
+            tx.commit();
+        }
+        finally {
+            U.closeQuiet(tx);
+        }
+
+        Product product1 = (Product)productStore.load(product.getId());
+        ProductOrder order1 = (ProductOrder)orderStore.load(order.getId());
+
+        if (product1 == null || order1 == null) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "no objects were persisted into Cassandra");
+        }
+
+        if (!product.equals(product1) || !order.equals(order1)) {
+            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
+                    "objects were incorrectly persisted/loaded to/from Cassandra");
+        }
+
+        LOGGER.info("Single operation write tests passed");
+
+        LOGGER.info("Running bulk operation write tests");
+
+        tx = txs.txStart(concurrency, isolation);
+
+        try {
+            productCache.putAll(productsMap);
+            orderCache.putAll(ordersMap);
+
+            productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
+            ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
+
+            if ((productsMap1 != null && !productsMap1.isEmpty()) || (ordersMap1 != null && !ordersMap1.isEmpty())) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already persisted into Cassandra");
+            }
+
+            tx.commit();
+        }
+        finally {
+            U.closeQuiet(tx);
+        }
+
+        productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
+        ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
+
+        if (productsMap1 == null || productsMap1.isEmpty() || ordersMap1 == null || ordersMap1.isEmpty()) {
+            throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                    "no objects were persisted into Cassandra");
+        }
+
+        if (productsMap1.size() < productsMap.size() || ordersMap1.size() < ordersMap.size()) {
+            throw new RuntimeException("Bulk write operation test failed. There were committed less objects " +
+                    "into Cassandra than expected");
+        }
+
+        if (productsMap1.size() > productsMap.size() || ordersMap1.size() > ordersMap.size()) {
+            throw new RuntimeException("Bulk write operation test failed. There were committed more objects " +
+                    "into Cassandra than expected");
+        }
+
+        for (Map.Entry<Long, Product> entry : productsMap.entrySet()) {
+            product = productsMap1.get(entry.getKey());
+
+            if (!entry.getValue().equals(product)) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                        "some objects were incorrectly persisted/loaded to/from Cassandra");
+            }
+        }
+
+        for (Map.Entry<Long, ProductOrder> entry : ordersMap.entrySet()) {
+            order = ordersMap1.get(entry.getKey());
+
+            if (!entry.getValue().equals(order)) {
+                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
+                        "some objects were incorrectly persisted/loaded to/from Cassandra");
+            }
+        }
+
+        LOGGER.info("Bulk operation write tests passed");
+
+        LOGGER.info("POJO strategy write tests passed");
+
+        LOGGER.info("Running POJO strategy delete tests");
+
+        LOGGER.info("Running single delete tests");
+
+        tx = txs.txStart(concurrency, isolation);
+
+        try {
+            productCache.remove(-1L);
+            orderCache.remove(-1L);
+
+            if (productStore.load(-1L) == null || orderStore.load(-1L) == null) {
+                throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already deleted from Cassandra");
+            }
+
+            tx.commit();
+        }
+        finally {
+            U.closeQuiet(tx);
+        }
+
+        if (productStore.load(-1L) != null || orderStore.load(-1L) != null) {
+            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
+                    "objects were not deleted from Cassandra");
+        }
+
+        LOGGER.info("Single delete tests passed");
+
+        LOGGER.info("Running bulk delete tests");
+
+        tx = txs.txStart(concurrency, isolation);
+
+        try {
+            productCache.removeAll(productsMap.keySet());
+            orderCache.removeAll(ordersMap.keySet());
+
+            productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
+            ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
+
+            if (productsMap1.size() != productsMap.size() || ordersMap1.size() != ordersMap.size()) {
+                throw new RuntimeException("Bulk delete operation test failed. Transaction wasn't committed yet, but " +
+                        "objects were already deleted from Cassandra");
+            }
+
+            tx.commit();
+        }
+        finally {
+            U.closeQuiet(tx);
+        }
+
+        productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
+        ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
+
+        if ((productsMap1 != null && !productsMap1.isEmpty()) || (ordersMap1 != null && !ordersMap1.isEmpty())) {
+            throw new RuntimeException("Bulk delete operation test failed. Transaction was committed, but " +
+                    "objects were not deleted from Cassandra");
+        }
+
+        LOGGER.info("Bulk delete tests passed");
+
+        LOGGER.info("POJO strategy delete tests passed");
+
+        LOGGER.info("-----------------------------------------------------------------------------------");
+        LOGGER.info("Passed POJO transaction tests for " + concurrency +
+                " concurrency and " + isolation + " isolation level");
+        LOGGER.info("-----------------------------------------------------------------------------------");
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java
new file mode 100644
index 0000000..33b11e1
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests;
+
+import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
+import org.apache.ignite.tests.utils.CassandraHelper;
+import org.apache.ignite.tests.utils.TestsHelper;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Recreates all required Cassandra database objects (keyspace, table, indexes) for load tests
+ */
+public class LoadTestsCassandraArtifactsCreator {
+    /**
+     * Recreates Cassandra artifacts required for load tests
+     * @param args not used
+     */
+    public static void main(String[] args) {
+        try {
+            System.out.println("[INFO] Recreating Cassandra artifacts (keyspace, table, indexes) for load tests");
+
+            KeyValuePersistenceSettings perSettings =
+                    new KeyValuePersistenceSettings(TestsHelper.getLoadTestsPersistenceSettings());
+
+            System.out.println("[INFO] Dropping test keyspace: " + perSettings.getKeyspace());
+
+            try {
+                CassandraHelper.dropTestKeyspaces();
+            } catch (Throwable e) {
+                throw new RuntimeException("Failed to drop test keyspace: " + perSettings.getKeyspace(), e);
+            }
+
+            System.out.println("[INFO] Test keyspace '" + perSettings.getKeyspace() + "' was successfully dropped");
+
+            System.out.println("[INFO] Creating test keyspace: " + perSettings.getKeyspace());
+
+            try {
+                CassandraHelper.executeWithAdminCredentials(perSettings.getKeyspaceDDLStatement());
+            } catch (Throwable e) {
+                throw new RuntimeException("Failed to create test keyspace: " + perSettings.getKeyspace(), e);
+            }
+
+            System.out.println("[INFO] Test keyspace '" + perSettings.getKeyspace() + "' was successfully created");
+
+            System.out.println("[INFO] Creating test table: " + perSettings.getTable());
+
+            try {
+                CassandraHelper.executeWithAdminCredentials(perSettings.getTableDDLStatement(perSettings.getTable()));
+            } catch (Throwable e) {
+                throw new RuntimeException("Failed to create test table: " + perSettings.getTable(), e);
+            }
+
+            System.out.println("[INFO] Test table '" + perSettings.getTable() + "' was successfully created");
+
+            List<String> statements = perSettings.getIndexDDLStatements(perSettings.getTable());
+            if (statements == null)
+                statements = new LinkedList<>();
+
+            for (String statement : statements) {
+                System.out.println("[INFO] Creating test table index:");
+                System.out.println(statement);
+
+                try {
+                    CassandraHelper.executeWithAdminCredentials(statement);
+                } catch (Throwable e) {
+                    throw new RuntimeException("Failed to create test table index", e);
+                }
+
+                System.out.println("[INFO] Test table index was successfully created");
+            }
+
+            System.out.println("[INFO] All required Cassandra artifacts were successfully recreated");
+        }
+        catch (Throwable e) {
+            System.out.println("[ERROR] Failed to recreate Cassandra artifacts");
+            e.printStackTrace(System.out);
+
+            if (e instanceof RuntimeException)
+                throw (RuntimeException)e;
+            else
+                throw new RuntimeException(e);
+        }
+        finally {
+            CassandraHelper.releaseCassandraResources();
+        }
+    }
+}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/Generator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/Generator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
similarity index 97%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
index a31abee..21490f6 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
@@ -26,7 +26,7 @@
         long val = i / 10000;
 
         while (val > Integer.MAX_VALUE)
-            val = val / 2;
+            val /= 2;
 
         return (int)val;
     }
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
similarity index 96%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
index 296839d..2582007 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
@@ -74,7 +74,7 @@
         }
 
         // calculates host unique prefix based on its subnet IP address
-        long hostUniqePrefix = getHostUniquePrefix();
+        long hostUniquePrefix = getHostUniquePrefix();
 
         logger().info("Load tests driver setup successfully completed");
 
@@ -87,8 +87,8 @@
 
             for (int i = 0; i < TestsHelper.getLoadTestsThreadsCount(); i++) {
                 Worker worker = createWorker(clazz, cfg,
-                    hostUniqePrefix + startPosition,
-                    hostUniqePrefix + startPosition + 100000000);
+                    hostUniquePrefix + startPosition,
+                    hostUniquePrefix + startPosition + 100000000);
                 workers.add(worker);
                 worker.setName(testName + "-worker-" + i);
                 worker.start();
@@ -224,14 +224,14 @@
         long part4 = Long.parseLong(parts[3]);
 
         if (part3 < 10)
-            part3 = part3 * 100;
+            part3 *= 100;
         else if (part4 < 100)
-            part3 = part3 * 10;
+            part3 *= 10;
 
         if (part4 < 10)
-            part4 = part4 * 100;
+            part4 *= 100;
         else if (part4 < 100)
-            part4 = part4 * 10;
+            part4 *= 10;
 
         return (part4 * 100000000000000L) + (part3 * 100000000000L) + Thread.currentThread().getId();
     }
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/LongGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
similarity index 92%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
index 0317320..01c5c77 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
@@ -38,6 +38,6 @@
 
     /** {@inheritDoc} */
     @Override public Object generate(long i) {
-        return new Person(Long.toString(i), Long.toString(i), (int)(i % 100), i % 2 == 0, i, i, DATE, PHONES);
+        return new Person(i, Long.toString(i), Long.toString(i), (int)(i % 100), i % 2 == 0, i, i, DATE, PHONES);
     }
 }
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/StringGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/Worker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
similarity index 96%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/Worker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
index f4bffc7..5f3c393 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/Worker.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
@@ -46,31 +46,31 @@
     boolean warmup = TestsHelper.getLoadTestsWarmupPeriod() != 0;
 
     /** */
-    private volatile long warmupStartTime = 0;
+    private volatile long warmupStartTime;
 
     /** */
-    private volatile long warmupFinishTime = 0;
+    private volatile long warmupFinishTime;
 
     /** */
-    private volatile long startTime = 0;
+    private volatile long startTime;
 
     /** */
-    private volatile long finishTime = 0;
+    private volatile long finishTime;
 
     /** */
-    private volatile long warmupMsgProcessed = 0;
+    private volatile long warmupMsgProcessed;
 
     /** */
-    private volatile long warmupSleepCnt = 0;
+    private volatile long warmupSleepCnt;
 
     /** */
-    private volatile long msgProcessed = 0;
+    private volatile long msgProcessed;
 
     /** */
-    private volatile long msgFailed = 0;
+    private volatile long msgFailed;
 
     /** */
-    private volatile long sleepCnt = 0;
+    private volatile long sleepCnt;
 
     /** */
     private Throwable executionError;
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/load/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/Person.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
similarity index 88%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/Person.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
index 8a1e623..16b64bd 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/Person.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.tests.pojos;
 
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -29,6 +31,9 @@
  */
 public class Person implements Externalizable {
     /** */
+    private long personNum;
+
+    /** */
     private String firstName;
 
     /** */
@@ -58,8 +63,9 @@
     }
 
     /** */
-    public Person(String firstName, String lastName, int age, boolean married,
+    public Person(long personNum, String firstName, String lastName, int age, boolean married,
         long height, float weight, Date birthDate, List<String> phones) {
+        this.personNum = personNum;
         this.firstName = firstName;
         this.lastName = lastName;
         this.age = age;
@@ -73,6 +79,7 @@
 
     /** {@inheritDoc} */
     @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeLong(personNum);
         out.writeObject(firstName);
         out.writeObject(lastName);
         out.writeInt(age);
@@ -86,6 +93,7 @@
     /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        personNum = in.readLong();
         firstName = (String)in.readObject();
         lastName = (String)in.readObject();
         age = in.readInt();
@@ -104,6 +112,9 @@
 
         Person person = (Person)obj;
 
+        if (personNum != person.personNum)
+            return false;
+
         if ((firstName != null && !firstName.equals(person.firstName)) ||
             (person.firstName != null && !person.firstName.equals(firstName)))
             return false;
@@ -132,6 +143,9 @@
 
         Person person = (Person)obj;
 
+        if (personNum != person.personNum)
+            return false;
+
         if ((firstName != null && !firstName.equals(person.firstName)) ||
             (person.firstName != null && !person.firstName.equals(firstName)))
             return false;
@@ -150,6 +164,18 @@
 
     /** */
     @SuppressWarnings("UnusedDeclaration")
+    public void setPersonNumber(long personNum) {
+        this.personNum = personNum;
+    }
+
+    /** */
+    @SuppressWarnings("UnusedDeclaration")
+    public long getPersonNumber() {
+        return personNum;
+    }
+
+    /** */
+    @SuppressWarnings("UnusedDeclaration")
     public void setFirstName(String name) {
         firstName = name;
     }
@@ -174,6 +200,13 @@
 
     /** */
     @SuppressWarnings("UnusedDeclaration")
+    @QuerySqlField
+    public String getFullName() {
+        return firstName + " " + lastName;
+    }
+
+    /** */
+    @SuppressWarnings("UnusedDeclaration")
     public void setAge(int age) {
         this.age = age;
     }
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/PersonId.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java
new file mode 100644
index 0000000..f8eadf4
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests.pojos;
+
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+
+/**
+ * Simple POJO to store information about product
+ */
+public class Product {
+    /** */
+    private long id;
+
+    /** */
+    private String type;
+
+    /** */
+    private String title;
+
+    /** */
+    private String description;
+
+    /** */
+    private float price;
+
+    /** */
+    public Product() {
+    }
+
+    /** */
+    public Product(long id, String type, String title, String description, float price) {
+        this.id = id;
+        this.type = type;
+        this.title = title;
+        this.description = description;
+        this.price = price;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return ((Long)id).hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        return obj instanceof Product && id == ((Product) obj).id;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return id + ", " + price + ", " + type + ", " + title + ", " + description;
+    }
+
+    /** */
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    /** */
+    @QuerySqlField(index = true)
+    public long getId() {
+        return id;
+    }
+
+    /** */
+    public void setType(String type) {
+        this.type = type;
+    }
+
+    /** */
+    @QuerySqlField
+    public String getType() {
+        return type;
+    }
+
+    /** */
+    public void setTitle(String title) {
+        this.title = title;
+    }
+
+    /** */
+    @QuerySqlField(index = true)
+    public String getTitle() {
+        return title;
+    }
+
+    /** */
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    /** */
+    @QuerySqlField
+    public String getDescription() {
+        return description;
+    }
+
+    /** */
+    public void setPrice(float price) {
+        this.price = price;
+    }
+
+    /** */
+    @QuerySqlField
+    public float getPrice() {
+        return price;
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java
new file mode 100644
index 0000000..bafc8f3
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests.pojos;
+
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * Simple POJO to store information about product order
+ */
+public class ProductOrder {
+    /** */
+    private static final DateFormat FORMAT = new SimpleDateFormat("MM/dd/yyyy/S");
+
+    /** */
+    private static final DateFormat FULL_FORMAT = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss:S");
+
+    /** */
+    private long id;
+
+    /** */
+    private long productId;
+
+    /** */
+    private Date date;
+
+    /** */
+    private int amount;
+
+    /** */
+    private float price;
+
+    /** */
+    public ProductOrder() {
+    }
+
+    /** */
+    public ProductOrder(long id, Product product, Date date, int amount) {
+        this(id, product.getId(), product.getPrice(), date, amount);
+    }
+
+    /** */
+    public ProductOrder(long id, long productId, float productPrice, Date date, int amount) {
+        this.id = id;
+        this.productId = productId;
+        this.date = date;
+        this.amount = amount;
+        this.price = productPrice * amount;
+
+        // if user ordered more than 10 items provide 5% discount
+        if (amount > 10)
+            price *= 0.95F;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return ((Long)id).hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        return obj instanceof ProductOrder && id == ((ProductOrder) obj).id;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return id + ", " + productId + ", " + FULL_FORMAT.format(date) + ", " + getDayMillisecond() + ", " + amount + ", " + price;
+    }
+
+    /** */
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    /** */
+    @QuerySqlField(index = true)
+    public long getId() {
+        return id;
+    }
+
+    /** */
+    public void setProductId(long productId) {
+        this.productId = productId;
+    }
+
+    /** */
+    @QuerySqlField(index = true)
+    public long getProductId() {
+        return productId;
+    }
+
+    /** */
+    public void setDate(Date date) {
+        this.date = date;
+    }
+
+    /** */
+    @QuerySqlField
+    public Date getDate() {
+        return date;
+    }
+
+    /** */
+    public void setAmount(int amount) {
+        this.amount = amount;
+    }
+
+    /** */
+    @QuerySqlField
+    public int getAmount() {
+        return amount;
+    }
+
+    /** */
+    public void setPrice(float price) {
+        this.price = price;
+    }
+
+    /** */
+    @QuerySqlField
+    public float getPrice() {
+        return price;
+    }
+
+    /** */
+    @QuerySqlField
+    public String getDayMillisecond() {
+        return FORMAT.format(date);
+    }
+}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/pojos/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
similarity index 78%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
index b5ff5ad..ddfa111 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
@@ -19,6 +19,7 @@
 
 import java.lang.reflect.Field;
 import org.apache.ignite.cache.store.CacheStore;
+import org.apache.ignite.cache.store.CacheStoreSession;
 import org.apache.ignite.cache.store.cassandra.CassandraCacheStore;
 import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
 import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
@@ -35,12 +36,24 @@
 
     /** */
     public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn) {
-        return createCacheStore(cacheName, persistenceSettings, conn, LOGGER);
+        return createCacheStore(cacheName, persistenceSettings, conn, null, LOGGER);
+    }
+
+    /** */
+    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
+        CacheStoreSession session) {
+        return createCacheStore(cacheName, persistenceSettings, conn, session, LOGGER);
     }
 
     /** */
     public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
         Logger log) {
+        return createCacheStore(cacheName, persistenceSettings, conn, null, log);
+    }
+
+    /** */
+    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
+        CacheStoreSession session, Logger log) {
         CassandraCacheStore<Integer, Integer> cacheStore =
             new CassandraCacheStore<>(conn, new KeyValuePersistenceSettings(persistenceSettings),
                 Runtime.getRuntime().availableProcessors());
@@ -52,7 +65,7 @@
             sesField.setAccessible(true);
             logField.setAccessible(true);
 
-            sesField.set(cacheStore, new TestCacheSession(cacheName));
+            sesField.set(cacheStore, session != null ? session : new TestCacheSession(cacheName));
             logField.set(cacheStore, new Log4JLogger(log));
         }
         catch (Throwable e) {
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
similarity index 87%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
index 66df6e7..e7047f3 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
@@ -20,10 +20,12 @@
 import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
 
 /**
- * Implementation of {@link org.apache.ignite.cache.store.cassandra.datasource.Credentials}
- * providing admin user/password to establish Cassandra session.
+ * Implementation of {@link Credentials} providing admin user/password to establish Cassandra session.
  */
 public class CassandraAdminCredentials implements Credentials {
+    /** */
+    private static final long serialVersionUID = 0L;
+
     /** {@inheritDoc} */
     @Override public String getUser() {
         return CassandraHelper.getAdminUser();
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
similarity index 87%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
index 52937ea..7546c9b 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
@@ -20,10 +20,12 @@
 import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
 
 /**
- * Implementation of {@link org.apache.ignite.cache.store.cassandra.datasource.Credentials}
- * providing regular user/password to establish Cassandra session.
+ * Implementation of {@link Credentials} providing regular user/password to establish Cassandra session.
  */
 public class CassandraRegularCredentials implements Credentials {
+    /** */
+    private static final long serialVersionUID = 0L;
+
     /** {@inheritDoc} */
     @Override public String getUser() {
         return CassandraHelper.getRegularUser();
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
similarity index 90%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
index 1cedb7a..3cb47e9 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
@@ -34,7 +34,7 @@
     private Transaction tx;
 
     /** */
-    private Map<Object, Object> props;
+    private Map<Object, Object> props = U.newHashMap(1);
 
     /** */
     private Object attach;
@@ -45,6 +45,13 @@
     }
 
     /** */
+    public TestCacheSession(String cacheName, Transaction tx, Map<Object, Object> props) {
+        this.cacheName = cacheName;
+        this.tx = tx;
+        this.props = props;
+    }
+
+    /** */
     @SuppressWarnings("UnusedDeclaration")
     public void newSession(@Nullable Transaction tx) {
         this.tx = tx;
@@ -78,9 +85,6 @@
     /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override public <K, V> Map<K, V> properties() {
-        if (props == null)
-            props = U.newHashMap(1);
-
         return (Map<K, V>)props;
     }
 
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java
new file mode 100644
index 0000000..5f3ec69
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests.utils;
+
+import org.apache.ignite.lang.IgniteAsyncSupport;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.apache.ignite.transactions.TransactionState;
+import org.jetbrains.annotations.Nullable;
+
+import java.util.UUID;
+
+/**
+ * Dummy transaction for test purposes.
+ */
+public class TestTransaction implements Transaction {
+    /** */
+    private final IgniteUuid xid = IgniteUuid.randomUuid();
+
+    /** {@inheritDoc} */
+    @Nullable @Override public IgniteUuid xid() {
+        return xid;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public UUID nodeId() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long threadId() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long startTime() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public TransactionIsolation isolation() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public TransactionConcurrency concurrency() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean implicit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isInvalidate() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public TransactionState state() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long timeout() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long timeout(long timeout) {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean setRollbackOnly() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isRollbackOnly() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void commit() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteAsyncSupport withAsync() {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isAsync() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public <R> IgniteFuture<R> future() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void rollback() {
+        // No-op.
+    }
+}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
new file mode 100644
index 0000000..24d64c9
--- /dev/null
+++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
@@ -0,0 +1,660 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tests.utils;
+
+
+import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
+import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
+import org.apache.ignite.tests.load.Generator;
+import org.apache.ignite.tests.pojos.Person;
+import org.apache.ignite.tests.pojos.PersonId;
+import org.apache.ignite.tests.pojos.Product;
+import org.apache.ignite.tests.pojos.ProductOrder;
+import org.springframework.core.io.ClassPathResource;
+
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.ResourceBundle;
+import java.util.Calendar;
+import java.util.Date;
+
+/**
+ * Helper class for all tests
+ */
+public class TestsHelper {
+    /** */
+    private static final String LETTERS_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+    /** */
+    private static final String NUMBERS_ALPHABET = "0123456789";
+
+    /** */
+    private static final Random RANDOM = new Random(System.currentTimeMillis());
+
+    /** */
+    private static final ResourceBundle TESTS_SETTINGS = ResourceBundle.getBundle("tests");
+
+    /** */
+    private static final int BULK_OPERATION_SIZE = parseTestSettings("bulk.operation.size");
+
+    /** */
+    private static final String LOAD_TESTS_CACHE_NAME = TESTS_SETTINGS.getString("load.tests.cache.name");
+
+    /** */
+    private static final int LOAD_TESTS_THREADS_COUNT = parseTestSettings("load.tests.threads.count");
+
+    /** */
+    private static final int LOAD_TESTS_WARMUP_PERIOD = parseTestSettings("load.tests.warmup.period");
+
+    /** */
+    private static final int LOAD_TESTS_EXECUTION_TIME = parseTestSettings("load.tests.execution.time");
+
+    /** */
+    private static final int LOAD_TESTS_REQUESTS_LATENCY = parseTestSettings("load.tests.requests.latency");
+
+    /** */
+    private static final int TRANSACTION_PRODUCTS_COUNT = parseTestSettings("transaction.products.count");
+
+    /** */
+    private static final int TRANSACTION_ORDERS_COUNT = parseTestSettings("transaction.orders.count");
+
+    /** */
+    private static final int ORDERS_YEAR;
+
+    /** */
+    private static final int ORDERS_MONTH;
+
+    /** */
+    private static final int ORDERS_DAY;
+
+    /** */
+    private static final String LOAD_TESTS_PERSISTENCE_SETTINGS = TESTS_SETTINGS.getString("load.tests.persistence.settings");
+
+    /** */
+    private static final String LOAD_TESTS_IGNITE_CONFIG = TESTS_SETTINGS.getString("load.tests.ignite.config");
+
+    /** */
+    private static final Generator LOAD_TESTS_KEY_GENERATOR;
+
+    /** */
+    private static final Generator LOAD_TESTS_VALUE_GENERATOR;
+
+    /** */
+    private static final String HOST_PREFIX;
+
+    static {
+        try {
+            LOAD_TESTS_KEY_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.key.generator")).newInstance();
+            LOAD_TESTS_VALUE_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.value.generator")).newInstance();
+
+            String[] parts = SystemHelper.HOST_IP.split("\\.");
+
+            String prefix = parts[3];
+            prefix = prefix.length() > 2 ? prefix.substring(prefix.length() - 2) : prefix;
+
+            HOST_PREFIX = prefix;
+
+            Calendar cl = Calendar.getInstance();
+
+            String year = TESTS_SETTINGS.getString("orders.year");
+            ORDERS_YEAR = !year.trim().isEmpty() ? Integer.parseInt(year) : cl.get(Calendar.YEAR);
+
+            String month = TESTS_SETTINGS.getString("orders.month");
+            ORDERS_MONTH = !month.trim().isEmpty() ? Integer.parseInt(month) : cl.get(Calendar.MONTH);
+
+            String day = TESTS_SETTINGS.getString("orders.day");
+            ORDERS_DAY = !day.trim().isEmpty() ? Integer.parseInt(day) : cl.get(Calendar.DAY_OF_MONTH);
+        }
+        catch (Throwable e) {
+            throw new RuntimeException("Failed to initialize TestsHelper", e);
+        }
+    }
+
+    /** */
+    private static int parseTestSettings(String name) {
+        return Integer.parseInt(TESTS_SETTINGS.getString(name));
+    }
+
+    /** */
+    public static int getLoadTestsThreadsCount() {
+        return LOAD_TESTS_THREADS_COUNT;
+    }
+
+    /** */
+    public static int getLoadTestsWarmupPeriod() {
+        return LOAD_TESTS_WARMUP_PERIOD;
+    }
+
+    /** */
+    public static int getLoadTestsExecutionTime() {
+        return LOAD_TESTS_EXECUTION_TIME;
+    }
+
+    /** */
+    public static int getLoadTestsRequestsLatency() {
+        return LOAD_TESTS_REQUESTS_LATENCY;
+    }
+
+    /** */
+    public static ClassPathResource getLoadTestsPersistenceSettings() {
+        return new ClassPathResource(LOAD_TESTS_PERSISTENCE_SETTINGS);
+    }
+
+    /** */
+    public static String getLoadTestsIgniteConfig() {
+        return LOAD_TESTS_IGNITE_CONFIG;
+    }
+
+    /** */
+    public static int getBulkOperationSize() {
+        return BULK_OPERATION_SIZE;
+    }
+
+    /** */
+    public static String getLoadTestsCacheName() {
+        return LOAD_TESTS_CACHE_NAME;
+    }
+
+    /** */
+    public static Object generateLoadTestsKey(long i) {
+        return LOAD_TESTS_KEY_GENERATOR.generate(i);
+    }
+
+    /** */
+    public static Object generateLoadTestsValue(long i) {
+        return LOAD_TESTS_VALUE_GENERATOR.generate(i);
+    }
+
+    /** */
+    @SuppressWarnings("unchecked")
+    public static CacheEntryImpl generateLoadTestsEntry(long i) {
+        return new CacheEntryImpl(TestsHelper.generateLoadTestsKey(i), TestsHelper.generateLoadTestsValue(i));
+    }
+
+    /** */
+    public static <K, V> Collection<K> getKeys(Collection<CacheEntryImpl<K, V>> entries) {
+        List<K> list = new LinkedList<>();
+
+        for (CacheEntryImpl<K, ?> entry : entries)
+            list.add(entry.getKey());
+
+        return list;
+    }
+
+    /** */
+    public static Map<Long, Long> generateLongsMap() {
+        return generateLongsMap(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Map<Long, Long> generateLongsMap(int cnt) {
+        Map<Long, Long> map = new HashMap<>();
+
+        for (long i = 0; i < cnt; i++)
+            map.put(i, i + 123);
+
+        return map;
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries() {
+        return generateLongsEntries(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries(int cnt) {
+        Collection<CacheEntryImpl<Long, Long>> entries = new LinkedList<>();
+
+        for (long i = 0; i < cnt; i++)
+            entries.add(new CacheEntryImpl<>(i, i + 123));
+
+        return entries;
+    }
+
+    /** */
+    public static Map<String, String> generateStringsMap() {
+        return generateStringsMap(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Map<String, String> generateStringsMap(int cnt) {
+        Map<String, String> map = new HashMap<>();
+
+        for (int i = 0; i < cnt; i++)
+            map.put(Integer.toString(i), randomString(5));
+
+        return map;
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries() {
+        return generateStringsEntries(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries(int cnt) {
+        Collection<CacheEntryImpl<String, String>> entries = new LinkedList<>();
+
+        for (int i = 0; i < cnt; i++)
+            entries.add(new CacheEntryImpl<>(Integer.toString(i), randomString(5)));
+
+        return entries;
+    }
+
+    /** */
+    public static Map<Long, Person> generateLongsPersonsMap() {
+        Map<Long, Person> map = new HashMap<>();
+
+        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
+            map.put(i, generateRandomPerson(i));
+
+        return map;
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<Long, Person>> generateLongsPersonsEntries() {
+        Collection<CacheEntryImpl<Long, Person>> entries = new LinkedList<>();
+
+        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
+            entries.add(new CacheEntryImpl<>(i, generateRandomPerson(i)));
+
+        return entries;
+    }
+
+    /** */
+    public static Map<PersonId, Person> generatePersonIdsPersonsMap() {
+        return generatePersonIdsPersonsMap(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Map<PersonId, Person> generatePersonIdsPersonsMap(int cnt) {
+        Map<PersonId, Person> map = new HashMap<>();
+
+        for (int i = 0; i < cnt; i++) {
+            PersonId id = generateRandomPersonId();
+
+            map.put(id, generateRandomPerson(id.getPersonNumber()));
+        }
+
+        return map;
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries() {
+        return generatePersonIdsPersonsEntries(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries(int cnt) {
+        Collection<CacheEntryImpl<PersonId, Person>> entries = new LinkedList<>();
+
+        for (int i = 0; i < cnt; i++) {
+            PersonId id = generateRandomPersonId();
+
+            entries.add(new CacheEntryImpl<>(id, generateRandomPerson(id.getPersonNumber())));
+        }
+
+        return entries;
+    }
+
+    /** */
+    public static List<CacheEntryImpl<Long, Product>> generateProductEntries() {
+        List<CacheEntryImpl<Long, Product>> entries = new LinkedList<>();
+
+        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
+            entries.add(new CacheEntryImpl<>(i, generateRandomProduct(i)));
+
+        return entries;
+    }
+
+    /** */
+    public static Collection<Long> getProductIds(Collection<CacheEntryImpl<Long, Product>> entries) {
+        List<Long> ids = new LinkedList<>();
+
+        for (CacheEntryImpl<Long, Product> entry : entries)
+            ids.add(entry.getKey());
+
+        return ids;
+    }
+
+    /** */
+    public static Map<Long, Product> generateProductsMap() {
+        return generateProductsMap(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Map<Long, Product> generateProductsMap(int count) {
+        Map<Long, Product> map = new HashMap<>();
+
+        for (long i = 0; i < count; i++)
+            map.put(i, generateRandomProduct(i));
+
+        return map;
+    }
+
+    /** */
+    public static Collection<CacheEntryImpl<Long, ProductOrder>> generateOrderEntries() {
+        Collection<CacheEntryImpl<Long, ProductOrder>> entries = new LinkedList<>();
+
+        for (long i = 0; i < BULK_OPERATION_SIZE; i++) {
+            ProductOrder order = generateRandomOrder(i);
+            entries.add(new CacheEntryImpl<>(order.getId(), order));
+        }
+
+        return entries;
+    }
+
+    /** */
+    public static Map<Long, ProductOrder> generateOrdersMap() {
+        return generateOrdersMap(BULK_OPERATION_SIZE);
+    }
+
+    /** */
+    public static Map<Long, ProductOrder> generateOrdersMap(int count) {
+        Map<Long, ProductOrder> map = new HashMap<>();
+
+        for (long i = 0; i < count; i++) {
+            ProductOrder order = generateRandomOrder(i);
+            map.put(order.getId(), order);
+        }
+
+        return map;
+    }
+
+    /** */
+    public static Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> generateOrdersPerProductEntries(
+            Collection<CacheEntryImpl<Long, Product>> products) {
+        return generateOrdersPerProductEntries(products, TRANSACTION_ORDERS_COUNT);
+    }
+
+    /** */
+    public static Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> generateOrdersPerProductEntries(
+            Collection<CacheEntryImpl<Long, Product>> products, int ordersPerProductCount) {
+        Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> map = new HashMap<>();
+
+        for (CacheEntryImpl<Long, Product> entry : products) {
+            List<CacheEntryImpl<Long, ProductOrder>> orders = new LinkedList<>();
+
+            for (long i = 0; i < ordersPerProductCount; i++) {
+                ProductOrder order = generateRandomOrder(entry.getKey());
+                orders.add(new CacheEntryImpl<>(order.getId(), order));
+            }
+
+            map.put(entry.getKey(), orders);
+        }
+
+        return map;
+    }
+
+    /** */
+    public static Map<Long, Map<Long, ProductOrder>> generateOrdersPerProductMap(Map<Long, Product> products) {
+        return generateOrdersPerProductMap(products, TRANSACTION_ORDERS_COUNT);
+    }
+
+    /** */
+    public static Map<Long, Map<Long, ProductOrder>> generateOrdersPerProductMap(Map<Long, Product> products,
+                                                                                 int ordersPerProductCount) {
+        Map<Long, Map<Long, ProductOrder>> map = new HashMap<>();
+
+        for (Map.Entry<Long, Product> entry : products.entrySet()) {
+            Map<Long, ProductOrder> orders = new HashMap<>();
+
+            for (long i = 0; i < ordersPerProductCount; i++) {
+                ProductOrder order = generateRandomOrder(entry.getKey());
+                orders.put(order.getId(), order);
+            }
+
+            map.put(entry.getKey(), orders);
+        }
+
+        return map;
+    }
+
+    public static Collection<Long> getOrderIds(Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> orders) {
+        Set<Long> ids = new HashSet<>();
+
+        for (Long key : orders.keySet()) {
+            for (CacheEntryImpl<Long, ProductOrder> entry : orders.get(key))
+                ids.add(entry.getKey());
+        }
+
+        return ids;
+    }
+
+    /** */
+    public static Person generateRandomPerson(long personNum) {
+        int phonesCnt = RANDOM.nextInt(4);
+
+        List<String> phones = new LinkedList<>();
+
+        for (int i = 0; i < phonesCnt; i++)
+            phones.add(randomNumber(4));
+
+        return new Person(personNum, randomString(4), randomString(4), RANDOM.nextInt(100),
+            RANDOM.nextBoolean(), RANDOM.nextLong(), RANDOM.nextFloat(), new Date(), phones);
+    }
+
+    /** */
+    public static PersonId generateRandomPersonId() {
+        return new PersonId(randomString(4), randomString(4), RANDOM.nextInt(100));
+    }
+
+    /** */
+    public static Product generateRandomProduct(long id) {
+        return new Product(id, randomString(2), randomString(6), randomString(20), generateProductPrice(id));
+    }
+
+    /** */
+    public static ProductOrder generateRandomOrder(long productId) {
+        return generateRandomOrder(productId, RANDOM.nextInt(10000));
+    }
+
+    /** */
+    private static ProductOrder generateRandomOrder(long productId, int saltedNumber) {
+        Calendar cl = Calendar.getInstance();
+        cl.set(Calendar.YEAR, ORDERS_YEAR);
+        cl.set(Calendar.MONTH, ORDERS_MONTH);
+        cl.set(Calendar.DAY_OF_MONTH, ORDERS_DAY);
+
+        long id = Long.parseLong(productId + System.currentTimeMillis() + HOST_PREFIX + saltedNumber);
+
+        return generateRandomOrder(id, productId, cl.getTime());
+    }
+
+    /** */
+    public static ProductOrder generateRandomOrder(long id, long productId, Date date) {
+        return new ProductOrder(id, productId, generateProductPrice(productId), date, 1 + RANDOM.nextInt(20));
+    }
+
+    /** */
+    public static boolean checkMapsEqual(Map map1, Map map2) {
+        if (map1 == null || map2 == null || map1.size() != map2.size())
+            return false;
+
+        for (Object key : map1.keySet()) {
+            Object obj1 = map1.get(key);
+            Object obj2 = map2.get(key);
+
+            if (obj1 == null || obj2 == null || !obj1.equals(obj2))
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static <K, V> boolean checkCollectionsEqual(Map<K, V> map, Collection<CacheEntryImpl<K, V>> col) {
+        if (map == null || col == null || map.size() != col.size())
+            return false;
+
+        for (CacheEntryImpl<K, V> entry : col) {
+            if (!entry.getValue().equals(map.get(entry.getKey())))
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkPersonMapsEqual(Map<K, Person> map1, Map<K, Person> map2,
+        boolean primitiveFieldsOnly) {
+        if (map1 == null || map2 == null || map1.size() != map2.size())
+            return false;
+
+        for (K key : map1.keySet()) {
+            Person person1 = map1.get(key);
+            Person person2 = map2.get(key);
+
+            boolean equals = person1 != null && person2 != null &&
+                (primitiveFieldsOnly ? person1.equalsPrimitiveFields(person2) : person1.equals(person2));
+
+            if (!equals)
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkPersonCollectionsEqual(Map<K, Person> map, Collection<CacheEntryImpl<K, Person>> col,
+        boolean primitiveFieldsOnly) {
+        if (map == null || col == null || map.size() != col.size())
+            return false;
+
+        for (CacheEntryImpl<K, Person> entry : col) {
+            boolean equals = primitiveFieldsOnly ?
+                entry.getValue().equalsPrimitiveFields(map.get(entry.getKey())) :
+                entry.getValue().equals(map.get(entry.getKey()));
+
+            if (!equals)
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkProductCollectionsEqual(Map<K, Product> map, Collection<CacheEntryImpl<K, Product>> col) {
+        if (map == null || col == null || map.size() != col.size())
+            return false;
+
+        for (CacheEntryImpl<K, Product> entry : col)
+            if (!entry.getValue().equals(map.get(entry.getKey())))
+                return false;
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkProductMapsEqual(Map<K, Product> map1, Map<K, Product> map2) {
+        if (map1 == null || map2 == null || map1.size() != map2.size())
+            return false;
+
+        for (K key : map1.keySet()) {
+            Product product1 = map1.get(key);
+            Product product2 = map2.get(key);
+
+            boolean equals = product1 != null && product2 != null && product1.equals(product2);
+
+            if (!equals)
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkOrderCollectionsEqual(Map<K, ProductOrder> map, Collection<CacheEntryImpl<K, ProductOrder>> col) {
+        if (map == null || col == null || map.size() != col.size())
+            return false;
+
+        for (CacheEntryImpl<K, ProductOrder> entry : col)
+            if (!entry.getValue().equals(map.get(entry.getKey())))
+                return false;
+
+        return true;
+    }
+
+    /** */
+    public static <K> boolean checkOrderMapsEqual(Map<K, ProductOrder> map1, Map<K, ProductOrder> map2) {
+        if (map1 == null || map2 == null || map1.size() != map2.size())
+            return false;
+
+        for (K key : map1.keySet()) {
+            ProductOrder order1 = map1.get(key);
+            ProductOrder order2 = map2.get(key);
+
+            boolean equals = order1 != null && order2 != null && order1.equals(order2);
+
+            if (!equals)
+                return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    public static String randomString(int len) {
+        StringBuilder builder = new StringBuilder(len);
+
+        for (int i = 0; i < len; i++)
+            builder.append(LETTERS_ALPHABET.charAt(RANDOM.nextInt(LETTERS_ALPHABET.length())));
+
+        return builder.toString();
+    }
+
+    /** */
+    public static String randomNumber(int len) {
+        StringBuilder builder = new StringBuilder(len);
+
+        for (int i = 0; i < len; i++)
+            builder.append(NUMBERS_ALPHABET.charAt(RANDOM.nextInt(NUMBERS_ALPHABET.length())));
+
+        return builder.toString();
+    }
+
+    /** */
+    private static float generateProductPrice(long productId) {
+        long id = productId < 1000 ?
+                (((productId + 1) * (productId + 1) * 1000) / 2) * 10 :
+                (productId / 20) * (productId / 20);
+
+        id = id == 0 ? 24 : id;
+
+        float price = Long.parseLong(Long.toString(id).replace("0", ""));
+
+        int i = 0;
+
+        while (price > 100) {
+            if (i % 2 != 0)
+                price = price / 2;
+            else
+                price = (float) Math.sqrt(price);
+
+            i++;
+        }
+
+        return ((float)((int)(price * 100))) / 100.0F;
+    }
+}
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/utils/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java
similarity index 100%
rename from modules/cassandra/src/test/java/org/apache/ignite/tests/utils/package-info.java
rename to modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java
diff --git a/modules/cassandra/src/test/resources/log4j.properties b/modules/cassandra/store/src/test/resources/log4j.properties
similarity index 100%
rename from modules/cassandra/src/test/resources/log4j.properties
rename to modules/cassandra/store/src/test/resources/log4j.properties
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
similarity index 90%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
index f7eb372..aec602e 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
@@ -26,7 +26,11 @@
     <bean id="cassandraAdminCredentials" class="org.apache.ignite.tests.utils.CassandraAdminCredentials"/>
     <bean id="cassandraRegularCredentials" class="org.apache.ignite.tests.utils.CassandraRegularCredentials"/>
 
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
+        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
+            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+        </constructor-arg>
+    </bean>
 
     <bean id="contactPoints" class="org.apache.ignite.tests.utils.CassandraHelper" factory-method="getContactPointsArray"/>
 
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
similarity index 94%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
index 9aa5c84..fbf38e9 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
@@ -40,12 +40,6 @@
     <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
         <property name="cacheConfiguration">
             <list>
-                <!-- Partitioned cache example configuration (Atomic mode). -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="atomicityMode" value="ATOMIC"/>
-                    <property name="backups" value="1"/>
-                </bean>
-
                 <!-- Configuring persistence for "cache1" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache1"/>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
similarity index 96%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
index 905c3e5..e872201 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
@@ -24,6 +24,6 @@
     <!-- Kryo serialization specified to be used -->
     <valuePersistence class="org.apache.ignite.tests.pojos.Person"
                       strategy="BLOB"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer"
+                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer"
                       column="value"/>
 </persistence>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
similarity index 65%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
index 8dcfffd..c9b45c8 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
@@ -41,16 +41,25 @@
         <constructor-arg type="org.springframework.core.io.Resource" value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml" />
     </bean>
 
+    <!-- Persistence settings for 'cache4' -->
+    <bean id="cache4_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
+        <constructor-arg type="org.springframework.core.io.Resource" value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml" />
+    </bean>
+
+    <!-- Persistence settings for 'product' -->
+    <bean id="product_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
+        <constructor-arg type="org.springframework.core.io.Resource" value="classpath:org/apache/ignite/tests/persistence/pojo/product.xml" />
+    </bean>
+
+    <!-- Persistence settings for 'order' -->
+    <bean id="order_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
+        <constructor-arg type="org.springframework.core.io.Resource" value="classpath:org/apache/ignite/tests/persistence/pojo/order.xml" />
+    </bean>
+
     <!-- Ignite configuration -->
     <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
         <property name="cacheConfiguration">
             <list>
-                <!-- Partitioned cache example configuration (Atomic mode). -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="atomicityMode" value="ATOMIC"/>
-                    <property name="backups" value="1"/>
-                </bean>
-
                 <!-- Configuring persistence for "cache1" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache1"/>
@@ -90,6 +99,47 @@
                     </property>
                 </bean>
 
+                <!-- Configuring persistence for "cache4" cache -->
+                <bean class="org.apache.ignite.configuration.CacheConfiguration">
+                    <property name="name" value="cache4"/>
+                    <property name="readThrough" value="true"/>
+                    <property name="writeThrough" value="true"/>
+                    <property name="cacheStoreFactory">
+                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
+                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
+                            <property name="persistenceSettingsBean" value="cache4_persistence_settings"/>
+                        </bean>
+                    </property>
+                </bean>
+
+                <!-- Configuring persistence for "product" cache -->
+                <bean class="org.apache.ignite.configuration.CacheConfiguration">
+                    <property name="name" value="product"/>
+                    <property name="readThrough" value="true"/>
+                    <property name="writeThrough" value="true"/>
+                    <property name="atomicityMode" value="TRANSACTIONAL"/>
+                    <property name="cacheStoreFactory">
+                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
+                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
+                            <property name="persistenceSettingsBean" value="product_persistence_settings"/>
+                        </bean>
+                    </property>
+                </bean>
+
+                <!-- Configuring persistence for "order" cache -->
+                <bean class="org.apache.ignite.configuration.CacheConfiguration">
+                    <property name="name" value="order"/>
+                    <property name="readThrough" value="true"/>
+                    <property name="writeThrough" value="true"/>
+                    <property name="atomicityMode" value="TRANSACTIONAL"/>
+                    <property name="cacheStoreFactory">
+                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
+                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
+                            <property name="persistenceSettingsBean" value="order_persistence_settings"/>
+                        </bean>
+                    </property>
+                </bean>
+
             </list>
         </property>
 
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml
new file mode 100644
index 0000000..d616364
--- /dev/null
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml
@@ -0,0 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<persistence keyspace="test1" table="order">
+    <keyPersistence class="java.lang.Long" column="id" strategy="PRIMITIVE" />
+    <valuePersistence class="org.apache.ignite.tests.pojos.ProductOrder" strategy="POJO" />
+</persistence>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
similarity index 98%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
index dd8eac5..f602508 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
@@ -146,7 +146,7 @@
     -->
     <valuePersistence class="org.apache.ignite.tests.pojos.Person"
                       strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer">
+                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer">
         <!--
          Mapping from POJO field to Cassandra table column.
 
@@ -161,8 +161,10 @@
            5) indexClass   [optional] - custom index java class name, in case you want to use custom index
            6) indexOptions [optional] - custom index options
         -->
+        <field name="personNumber" column="number" />
         <field name="firstName" column="first_name" />
         <field name="lastName" column="last_name" />
+        <field name="fullName" />
         <field name="age" />
         <field name="married" index="true"/>
         <field name="height" />
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
similarity index 97%
copy from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
copy to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
index dd8eac5..490d8e7 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
@@ -25,7 +25,7 @@
   2) table    [required] - Cassandra tables which should be used to store key/value pairs
   3) ttl      [optional] - expiration period for the table rows (in seconds)
 -->
-<persistence keyspace="test1" table="pojo_test3" ttl="86400">
+<persistence keyspace="test1" ttl="86400">
     <!--
     Cassandra keyspace options which should be used to create provided keyspace if it doesn't exist.
 
@@ -146,7 +146,7 @@
     -->
     <valuePersistence class="org.apache.ignite.tests.pojos.Person"
                       strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer">
+                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer">
         <!--
          Mapping from POJO field to Cassandra table column.
 
@@ -161,8 +161,10 @@
            5) indexClass   [optional] - custom index java class name, in case you want to use custom index
            6) indexOptions [optional] - custom index options
         -->
+        <field name="personNumber" column="number" />
         <field name="firstName" column="first_name" />
         <field name="lastName" column="last_name" />
+        <field name="fullName" />
         <field name="age" />
         <field name="married" index="true"/>
         <field name="height" />
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml
new file mode 100644
index 0000000..c761e1c
--- /dev/null
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml
@@ -0,0 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<persistence keyspace="test1" table="product">
+    <keyPersistence class="java.lang.Long" column="id" strategy="PRIMITIVE" />
+    <valuePersistence class="org.apache.ignite.tests.pojos.Product" strategy="POJO" />
+</persistence>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
similarity index 94%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
index fb6b055..13e0922 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
@@ -40,12 +40,6 @@
     <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
         <property name="cacheConfiguration">
             <list>
-                <!-- Partitioned cache example configuration (Atomic mode). -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="atomicityMode" value="ATOMIC"/>
-                    <property name="backups" value="1"/>
-                </bean>
-
                 <!-- Configuring persistence for "cache1" cache -->
                 <bean class="org.apache.ignite.configuration.CacheConfiguration">
                     <property name="name" value="cache1"/>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
similarity index 94%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
index 50b2164..5b5bb59 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
@@ -24,7 +24,11 @@
         http://www.springframework.org/schema/util
         http://www.springframework.org/schema/util/spring-util.xsd">
 
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
+        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
+            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+        </constructor-arg>
+    </bean>
 
     <util:list id="contactPoints" value-type="java.lang.String">
         <value>cassandra-node-1.abc.com</value>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
similarity index 95%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
index 1dc6f8a..8d71aec 100644
--- a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
+++ b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
@@ -25,7 +25,11 @@
         http://www.springframework.org/schema/util/spring-util.xsd">
 
     <!-- Cassandra connection settings -->
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
+        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
+            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
+        </constructor-arg>
+    </bean>
 
     <util:list id="contactPoints" value-type="java.lang.String">
         <value>cassandra-node-1.abc.com</value>
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
diff --git a/modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
similarity index 100%
rename from modules/cassandra/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
rename to modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
diff --git a/modules/cassandra/src/test/resources/tests.properties b/modules/cassandra/store/src/test/resources/tests.properties
similarity index 87%
rename from modules/cassandra/src/test/resources/tests.properties
rename to modules/cassandra/store/src/test/resources/tests.properties
index 2c91e57..b11f2c8 100644
--- a/modules/cassandra/src/test/resources/tests.properties
+++ b/modules/cassandra/store/src/test/resources/tests.properties
@@ -16,6 +16,21 @@
 # Number of elements for CacheStore bulk operations: loadAll, writeAll, deleteAll
 bulk.operation.size=100
 
+# Number of product per transaction
+transaction.products.count=2
+
+# Number of orders per transaction
+transaction.orders.count=10
+
+# Year to use for generating new orders
+orders.year=
+
+# Month to use for generating new orders
+orders.month=
+
+# Day of month to use for generating new orders
+orders.day=
+
 # ----- Load tests settings -----
 
 # Ignite cache to be used by load tests
diff --git a/modules/cassandra/src/test/scripts/cassandra-load-tests.bat b/modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat
similarity index 100%
rename from modules/cassandra/src/test/scripts/cassandra-load-tests.bat
rename to modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat
diff --git a/modules/cassandra/src/test/scripts/cassandra-load-tests.sh b/modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh
similarity index 100%
rename from modules/cassandra/src/test/scripts/cassandra-load-tests.sh
rename to modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh
diff --git a/modules/cassandra/src/test/scripts/ignite-load-tests.bat b/modules/cassandra/store/src/test/scripts/ignite-load-tests.bat
similarity index 100%
rename from modules/cassandra/src/test/scripts/ignite-load-tests.bat
rename to modules/cassandra/store/src/test/scripts/ignite-load-tests.bat
diff --git a/modules/cassandra/src/test/scripts/ignite-load-tests.sh b/modules/cassandra/store/src/test/scripts/ignite-load-tests.sh
similarity index 100%
rename from modules/cassandra/src/test/scripts/ignite-load-tests.sh
rename to modules/cassandra/store/src/test/scripts/ignite-load-tests.sh
diff --git a/modules/cassandra/src/test/scripts/jvm-opt.sh b/modules/cassandra/store/src/test/scripts/jvm-opt.sh
similarity index 100%
rename from modules/cassandra/src/test/scripts/jvm-opt.sh
rename to modules/cassandra/store/src/test/scripts/jvm-opt.sh
diff --git a/modules/cassandra/src/test/scripts/jvm-opts.bat b/modules/cassandra/store/src/test/scripts/jvm-opts.bat
similarity index 100%
rename from modules/cassandra/src/test/scripts/jvm-opts.bat
rename to modules/cassandra/store/src/test/scripts/jvm-opts.bat
diff --git a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat
new file mode 100644
index 0000000..d538ea4
--- /dev/null
+++ b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat
@@ -0,0 +1,41 @@
+::
+:: Licensed to the Apache Software Foundation (ASF) under one or more
+:: contributor license agreements.  See the NOTICE file distributed with
+:: this work for additional information regarding copyright ownership.
+:: The ASF licenses this file to You under the Apache License, Version 2.0
+:: (the "License"); you may not use this file except in compliance with
+:: the License.  You may obtain a copy of the License at
+::
+::      http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+::
+
+echo off
+
+echo.
+
+set TESTS_CLASSPATH="%~dp0\lib*;%~dp0settings"
+
+call %~dp0jvm-opts.bat %*
+
+call java %JVM_OPTS% -cp "%TESTS_CLASSPATH%" "org.apache.ignite.tests.LoadTestsCassandraArtifactsCreator"
+
+if %errorLevel% NEQ 0 (
+    echo.
+    echo --------------------------------------------------------------------------------
+    echo [ERROR] Failed to recreate Cassandra artifacts
+    echo --------------------------------------------------------------------------------
+    echo.
+    exit /b %errorLevel%
+)
+
+echo.
+echo --------------------------------------------------------------------------------
+echo [INFO] Cassandra artifacts were successfully recreated
+echo --------------------------------------------------------------------------------
+echo.
diff --git a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh
new file mode 100644
index 0000000..b0f99be
--- /dev/null
+++ b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+TESTS_ROOT=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))
+TESTS_CLASSPATH="$TESTS_ROOT/lib/*:$TESTS_ROOT/settings"
+
+. $TESTS_ROOT/jvm-opt.sh $@
+
+java $JVM_OPTS -cp "$TESTS_CLASSPATH" "org.apache.ignite.tests.LoadTestsCassandraArtifactsCreator"
+
+if [ $? -ne 0 ]; then
+    echo
+    echo "--------------------------------------------------------------------------------"
+    echo "[ERROR] Failed to recreate Cassandra artifacts"
+    echo "--------------------------------------------------------------------------------"
+    echo
+    exit 1
+fi
+
+echo
+echo "--------------------------------------------------------------------------------"
+echo "[INFO] Cassandra artifacts were successfully recreated"
+echo "--------------------------------------------------------------------------------"
+echo
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java
index cc67a65..131ed74 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java
@@ -495,8 +495,8 @@
 
         while (rs.next()) {
             if (cnt == 0) {
-                assert "http://abc.com/".equals(rs.getURL("urlVal").toString());
-                assert "http://abc.com/".equals(rs.getURL(15).toString());
+                assertTrue("http://abc.com/".equals(rs.getURL("urlVal").toString()));
+                assertTrue("http://abc.com/".equals(rs.getURL(15).toString()));
             }
 
             cnt++;
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcResultSetSelfTest.java
index 013fd09..7b077fc 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcResultSetSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcResultSetSelfTest.java
@@ -429,8 +429,8 @@
 
         while (rs.next()) {
             if (cnt == 0) {
-                assert "http://abc.com/".equals(rs.getURL("urlVal").toString());
-                assert "http://abc.com/".equals(rs.getURL(15).toString());
+                assertTrue("http://abc.com/".equals(rs.getURL("urlVal").toString()));
+                assertTrue("http://abc.com/".equals(rs.getURL(15).toString()));
             }
 
             cnt++;
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java
index bd21468..0de08d5 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignite.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java
@@ -20,6 +20,7 @@
 import java.util.Collection;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
+import javax.cache.CacheException;
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.cache.affinity.Affinity;
 import org.apache.ignite.cluster.ClusterGroup;
@@ -220,8 +221,24 @@
      *
      * @param cacheCfg Cache configuration to use.
      * @return Instance of started cache.
+     * @throws CacheException If a cache with the same name already exists or other error occurs.
      */
-    public <K, V> IgniteCache<K, V> createCache(CacheConfiguration<K, V> cacheCfg);
+    public <K, V> IgniteCache<K, V> createCache(CacheConfiguration<K, V> cacheCfg) throws CacheException;
+
+    /**
+     * Dynamically starts new caches with the given cache configurations.
+     * <p>
+     * If local node is an affinity node, this method will return the instance of started caches.
+     * Otherwise, it will create a client caches on local node.
+     * <p>
+     * If for one of configurations a cache with the same name already exists in the grid, an exception will be thrown regardless
+     * whether the given configuration matches the configuration of the existing cache or not.
+     *
+     * @param cacheCfgs Collection of cache configuration to use.
+     * @return Collection of instances of started caches.
+     * @throws CacheException If one of created caches exists or other error occurs.
+     */
+    public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) throws CacheException;
 
     /**
      * Dynamically starts new cache using template configuration.
@@ -233,8 +250,9 @@
      *
      * @param cacheName Cache name.
      * @return Instance of started cache.
+     * @throws CacheException If a cache with the same name already exists or other error occurs.
      */
-    public <K, V> IgniteCache<K, V> createCache(String cacheName);
+    public <K, V> IgniteCache<K, V> createCache(String cacheName) throws CacheException;
 
     /**
      * Gets existing cache with the given name or creates new one with the given configuration.
@@ -245,23 +263,39 @@
      *
      * @param cacheCfg Cache configuration to use.
      * @return Existing or newly created cache.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg);
+    public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg) throws CacheException;
 
     /**
      * Gets existing cache with the given name or creates new one using template configuration.
      *
      * @param cacheName Cache name.
      * @return Existing or newly created cache.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> IgniteCache<K, V> getOrCreateCache(String cacheName);
+    public <K, V> IgniteCache<K, V> getOrCreateCache(String cacheName) throws CacheException;
+
+    /**
+     * Gets existing caches with the given name or created one with the given configuration.
+     * <p>
+     * If a cache with the same name already exist, this method will not check that the given
+     * configuration matches the configuration of existing cache and will return an instance
+     * of the existing cache.
+     *
+     * @param cacheCfgs Collection of cache configuration to use.
+     * @return Collection of existing or newly created caches.
+     * @throws CacheException If error occurs.
+     */
+    public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) throws CacheException;
 
     /**
      * Adds cache configuration template.
      *
      * @param cacheCfg Cache configuration template.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> void addCacheConfiguration(CacheConfiguration<K, V> cacheCfg);
+    public <K, V> void addCacheConfiguration(CacheConfiguration<K, V> cacheCfg) throws CacheException;
 
     /**
      * Dynamically starts new cache with the given cache configuration.
@@ -275,10 +309,11 @@
      * @param cacheCfg Cache configuration to use.
      * @param nearCfg Near cache configuration to use on local node in case it is not an
      *      affinity node.
+     * @throws CacheException If a cache with the same name already exists or other error occurs.
      * @return Instance of started cache.
      */
     public <K, V> IgniteCache<K, V> createCache(CacheConfiguration<K, V> cacheCfg,
-        NearCacheConfiguration<K, V> nearCfg);
+        NearCacheConfiguration<K, V> nearCfg) throws CacheException;
 
     /**
      * Gets existing cache with the given cache configuration or creates one if it does not exist.
@@ -293,9 +328,10 @@
      * @param cacheCfg Cache configuration.
      * @param nearCfg Near cache configuration for client.
      * @return {@code IgniteCache} instance.
+     * @throws CacheException If error occurs.
      */
     public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg,
-        NearCacheConfiguration<K, V> nearCfg);
+        NearCacheConfiguration<K, V> nearCfg) throws CacheException;
 
     /**
      * Starts a near cache on local node if cache was previously started with one of the
@@ -305,8 +341,10 @@
      * @param cacheName Cache name.
      * @param nearCfg Near cache configuration.
      * @return Cache instance.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> IgniteCache<K, V> createNearCache(@Nullable String cacheName, NearCacheConfiguration<K, V> nearCfg);
+    public <K, V> IgniteCache<K, V> createNearCache(@Nullable String cacheName, NearCacheConfiguration<K, V> nearCfg)
+        throws CacheException;
 
     /**
      * Gets existing near cache with the given name or creates a new one.
@@ -314,15 +352,26 @@
      * @param cacheName Cache name.
      * @param nearCfg Near configuration.
      * @return {@code IgniteCache} instance.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> IgniteCache<K, V> getOrCreateNearCache(@Nullable String cacheName, NearCacheConfiguration<K, V> nearCfg);
+    public <K, V> IgniteCache<K, V> getOrCreateNearCache(@Nullable String cacheName, NearCacheConfiguration<K, V> nearCfg)
+        throws CacheException;
 
     /**
      * Stops dynamically started cache.
      *
      * @param cacheName Cache name to stop.
+     * @throws CacheException If error occurs.
      */
-    public void destroyCache(String cacheName);
+    public void destroyCache(String cacheName) throws CacheException;
+
+    /**
+     * Stops dynamically started caches.
+     *
+     * @param cacheNames Collection of cache names to stop.
+     * @throws CacheException If error occurs.
+     */
+    public void destroyCaches(Collection<String> cacheNames) throws CacheException;
 
     /**
      * Gets an instance of {@link IgniteCache} API. {@code IgniteCache} is a fully-compatible
@@ -330,8 +379,9 @@
      *
      * @param name Cache name.
      * @return Instance of the cache for the specified name.
+     * @throws CacheException If error occurs.
      */
-    public <K, V> IgniteCache<K, V> cache(@Nullable String name);
+    public <K, V> IgniteCache<K, V> cache(@Nullable String name) throws CacheException;
 
     /**
      * Gets the collection of names of currently available caches.
@@ -357,8 +407,9 @@
      *
      * @param cacheName Cache name ({@code null} for default cache).
      * @return Data streamer.
+     * @throws IllegalStateException If node is stopping.
      */
-    public <K, V> IgniteDataStreamer<K, V> dataStreamer(@Nullable String cacheName);
+    public <K, V> IgniteDataStreamer<K, V> dataStreamer(@Nullable String cacheName) throws IllegalStateException;
 
     /**
      * Gets an instance of IGFS (Ignite In-Memory File System). If one is not
@@ -372,7 +423,7 @@
      * @return IGFS instance.
      * @throws IllegalArgumentException If IGFS with such name is not configured.
      */
-    public IgniteFileSystem fileSystem(String name);
+    public IgniteFileSystem fileSystem(String name) throws IllegalArgumentException;
 
     /**
      * Gets all instances of IGFS (Ignite In-Memory File System).
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteServices.java b/modules/core/src/main/java/org/apache/ignite/IgniteServices.java
index 08577c5..5430e4d 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteServices.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteServices.java
@@ -62,9 +62,9 @@
  * you can also automatically deploy services on startup by specifying them in {@link IgniteConfiguration}
  * like so:
  * <pre name="code" class="java">
- * IgniteConfiguration gridCfg = new IgniteConfiguration();
+ * IgniteConfiguration cfg = new IgniteConfiguration();
  *
- * GridServiceConfiguration svcCfg1 = new GridServiceConfiguration();
+ * ServiceConfiguration svcCfg1 = new ServiceConfiguration();
  *
  * // Cluster-wide singleton configuration.
  * svcCfg1.setName("myClusterSingletonService");
@@ -72,16 +72,16 @@
  * svcCfg1.setTotalCount(1);
  * svcCfg1.setService(new MyClusterSingletonService());
  *
- * GridServiceConfiguration svcCfg2 = new GridServiceConfiguration();
+ * ServiceConfiguration svcCfg2 = new ServiceConfiguration();
  *
  * // Per-node singleton configuration.
  * svcCfg2.setName("myNodeSingletonService");
  * svcCfg2.setMaxPerNodeCount(1);
  * svcCfg2.setService(new MyNodeSingletonService());
  *
- * gridCfg.setServiceConfiguration(svcCfg1, svcCfg2);
+ * cfg.setServiceConfiguration(svcCfg1, svcCfg2);
  * ...
- * Ignition.start(gridCfg);
+ * Ignition.start(cfg);
  * </pre>
  * <h1 class="header">Load Balancing</h1>
  * In all cases, other than singleton service deployment, Ignite will automatically make sure that
@@ -106,18 +106,18 @@
  * Here is an example of how an distributed service may be implemented and deployed:
  * <pre name="code" class="java">
  * // Simple service implementation.
- * public class MyGridService implements GridService {
+ * public class MyIgniteService implements Service {
  *      ...
  *      // Example of ignite resource injection. All resources are optional.
  *      // You should inject resources only as needed.
  *      &#64;IgniteInstanceResource
- *      private Grid grid;
+ *      private Ignite ignite;
  *      ...
- *      &#64;Override public void cancel(GridServiceContext ctx) {
+ *      &#64;Override public void cancel(ServiceContext ctx) {
  *          // No-op.
  *      }
  *
- *      &#64;Override public void execute(GridServiceContext ctx) {
+ *      &#64;Override public void execute(ServiceContext ctx) {
  *          // Loop until service is cancelled.
  *          while (!ctx.isCancelled()) {
  *              // Do something.
@@ -126,16 +126,16 @@
  *      }
  *  }
  * ...
- * GridServices svcs = grid.services();
+ * IgniteServices svcs = ignite.services();
  *
- * svcs.deployClusterSingleton("mySingleton", new MyGridService());
+ * svcs.deployClusterSingleton("mySingleton", new MyIgniteService());
  * </pre>
  */
 public interface IgniteServices extends IgniteAsyncSupport {
     /**
-     * Gets the cluster group to which this {@code GridServices} instance belongs.
+     * Gets the cluster group to which this {@code IgniteServices} instance belongs.
      *
-     * @return Cluster group to which this {@code GridServices} instance belongs.
+     * @return Cluster group to which this {@code IgniteServices} instance belongs.
      */
     public ClusterGroup clusterGroup();
 
@@ -187,7 +187,7 @@
      * This method is analogous to the invocation of {@link #deploy(org.apache.ignite.services.ServiceConfiguration)} method
      * as follows:
      * <pre name="code" class="java">
-     *     GridServiceConfiguration cfg = new GridServiceConfiguration();
+     *     ServiceConfiguration cfg = new ServiceConfiguration();
      *
      *     cfg.setName(name);
      *     cfg.setService(svc);
@@ -196,7 +196,7 @@
      *     cfg.setTotalCount(1);
      *     cfg.setMaxPerNodeCount(1);
      *
-     *     grid.services().deploy(cfg);
+     *     ignite.services().deploy(cfg);
      * </pre>
      *
      * @param name Service name.
@@ -224,14 +224,14 @@
      * This method is analogous to the invocation of {@link #deploy(org.apache.ignite.services.ServiceConfiguration)} method
      * as follows:
      * <pre name="code" class="java">
-     *     GridServiceConfiguration cfg = new GridServiceConfiguration();
+     *     ServiceConfiguration cfg = new ServiceConfiguration();
      *
      *     cfg.setName(name);
      *     cfg.setService(svc);
      *     cfg.setTotalCount(totalCnt);
      *     cfg.setMaxPerNodeCount(maxPerNodeCnt);
      *
-     *     grid.services().deploy(cfg);
+     *     ignite.services().deploy(cfg);
      * </pre>
      *
      * @param name Service name.
@@ -266,14 +266,14 @@
      * <p>
      * Here is an example of creating service deployment configuration:
      * <pre name="code" class="java">
-     *     GridServiceConfiguration cfg = new GridServiceConfiguration();
+     *     ServiceConfiguration cfg = new ServiceConfiguration();
      *
      *     cfg.setName(name);
      *     cfg.setService(svc);
      *     cfg.setTotalCount(0); // Unlimited.
      *     cfg.setMaxPerNodeCount(2); // Deploy 2 instances of service on each node.
      *
-     *     grid.services().deploy(cfg);
+     *     ignite.services().deploy(cfg);
      * </pre>
      *
      * @param cfg Service configuration.
@@ -312,14 +312,14 @@
     public void cancelAll() throws IgniteException;
 
     /**
-     * Gets metadata about all deployed services.
+     * Gets metadata about all deployed services in the grid.
      *
-     * @return Metadata about all deployed services.
+     * @return Metadata about all deployed services in the grid.
      */
     public Collection<ServiceDescriptor> serviceDescriptors();
 
     /**
-     * Gets deployed service with specified name.
+     * Gets locally deployed service with specified name.
      *
      * @param name Service name.
      * @param <T> Service type
@@ -328,7 +328,7 @@
     public <T> T service(String name);
 
     /**
-     * Gets all deployed services with specified name.
+     * Gets all locally deployed services with specified name.
      *
      * @param name Service name.
      * @param <T> Service type.
@@ -352,4 +352,4 @@
 
     /** {@inheritDoc} */
     @Override public IgniteServices withAsync();
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/affinity/fair/FairAffinityFunction.java b/modules/core/src/main/java/org/apache/ignite/cache/affinity/fair/FairAffinityFunction.java
index 105efab..cf1cb02 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/affinity/fair/FairAffinityFunction.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/affinity/fair/FairAffinityFunction.java
@@ -331,7 +331,7 @@
                 balance(tier, pendingParts, fullMap, topSnapshot, true);
 
                 if (!exclNeighborsWarn) {
-                    LT.warn(log, null, "Affinity function excludeNeighbors property is ignored " +
+                    LT.warn(log, "Affinity function excludeNeighbors property is ignored " +
                         "because topology has no enough nodes to assign backups.");
 
                     exclNeighborsWarn = true;
@@ -354,6 +354,10 @@
 
     /** {@inheritDoc} */
     @Override public int partition(Object key) {
+        if (key == null)
+            throw new IllegalArgumentException("Null key is passed for a partition calculation. " +
+                "Make sure that an affinity key that is used is initialized properly.");
+
         return U.safeAbs(hash(key.hashCode())) % parts;
     }
 
@@ -1137,4 +1141,4 @@
             return "PartSet [nodeId=" + node.id() + ", size=" + parts.size() + ", parts=" + parts + ']';
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
index ec12973..b5eabe1 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
@@ -440,7 +440,7 @@
             }
 
             if (!exclNeighborsWarn) {
-                LT.warn(log, null, "Affinity function excludeNeighbors property is ignored " +
+                LT.warn(log, "Affinity function excludeNeighbors property is ignored " +
                     "because topology has no enough nodes to assign backups.");
 
                 exclNeighborsWarn = true;
@@ -464,6 +464,10 @@
 
     /** {@inheritDoc} */
     @Override public int partition(Object key) {
+        if (key == null)
+            throw new IllegalArgumentException("Null key is passed for a partition calculation. " +
+                "Make sure that an affinity key that is used is initialized properly.");
+
         return U.safeAbs(key.hashCode() % parts);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
index 1963509..57aab00 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
@@ -1000,7 +1000,7 @@
                     // at least one waiting request, then it is possible starvation.
                     if (exec.getPoolSize() == exec.getActiveCount() && completedCnt == lastCompletedCnt &&
                         !exec.getQueue().isEmpty())
-                        LT.warn(log, null, "Possible thread pool starvation detected (no task completed in last " +
+                        LT.warn(log, "Possible thread pool starvation detected (no task completed in last " +
                             interval + "ms, is executorService pool size large enough?)");
 
                     lastCompletedCnt = completedCnt;
@@ -2531,6 +2531,33 @@
         }
     }
 
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) {
+        A.notNull(cacheCfgs, "cacheCfgs");
+
+        guard();
+
+        try {
+            ctx.cache().dynamicStartCaches(cacheCfgs,
+                true,
+                true).get();
+
+            List<IgniteCache> createdCaches = new ArrayList<>(cacheCfgs.size());
+
+            for (CacheConfiguration cacheCfg : cacheCfgs)
+                createdCaches.add(ctx.cache().publicJCache(cacheCfg.getName()));
+
+            return createdCaches;
+        }
+        catch (IgniteCheckedException e) {
+            throw CU.convertToCacheException(e);
+        }
+        finally {
+            unguard();
+        }
+    }
+
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
         guard();
@@ -2575,6 +2602,32 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) {
+        A.notNull(cacheCfgs, "cacheCfgs");
+
+        guard();
+
+        try {
+            ctx.cache().dynamicStartCaches(cacheCfgs,
+                false,
+                true).get();
+
+            List<IgniteCache> createdCaches = new ArrayList<>(cacheCfgs.size());
+
+            for (CacheConfiguration cacheCfg : cacheCfgs)
+                createdCaches.add(ctx.cache().publicJCache(cacheCfg.getName()));
+
+            return createdCaches;
+        }
+        catch (IgniteCheckedException e) {
+            throw CU.convertToCacheException(e);
+        }
+        finally {
+            unguard();
+        }
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(
         CacheConfiguration<K, V> cacheCfg,
         NearCacheConfiguration<K, V> nearCfg
@@ -2735,6 +2788,18 @@
         }
     }
 
+    /** {@inheritDoc} */
+    @Override public void destroyCaches(Collection<String> cacheNames) {
+        IgniteInternalFuture stopFut = destroyCachesAsync(cacheNames, true);
+
+        try {
+            stopFut.get();
+        }
+        catch (IgniteCheckedException e) {
+            throw CU.convertToCacheException(e);
+        }
+    }
+
     /**
      * @param cacheName Cache name.
      * @param checkThreadTx If {@code true} checks that current thread does not have active transactions.
@@ -2751,6 +2816,22 @@
         }
     }
 
+    /**
+     * @param cacheNames Collection of cache names.
+     * @param checkThreadTx If {@code true} checks that current thread does not have active transactions.
+     * @return Ignite future.
+     */
+    public IgniteInternalFuture<?> destroyCachesAsync(Collection<String> cacheNames, boolean checkThreadTx) {
+        guard();
+
+        try {
+            return ctx.cache().dynamicDestroyCaches(cacheNames, checkThreadTx);
+        }
+        finally {
+            unguard();
+        }
+    }
+
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> getOrCreateCache(String cacheName) {
         guard();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteServicesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteServicesImpl.java
index b8042c3..c9d205b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteServicesImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteServicesImpl.java
@@ -36,7 +36,7 @@
 import org.jetbrains.annotations.Nullable;
 
 /**
- * {@link org.apache.ignite.IgniteCompute} implementation.
+ * {@link org.apache.ignite.IgniteServices} implementation.
  */
 public class IgniteServicesImpl extends AsyncSupportAdapter implements IgniteServices, Externalizable {
     /** */
@@ -289,4 +289,4 @@
     protected Object readResolve() throws ObjectStreamException {
         return prj.services();
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
index 5b2c3fc..b3a9eec 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
@@ -1640,6 +1640,8 @@
                 ensureMultiInstanceSupport(myCfg.getSwapSpaceSpi());
             }
 
+            validateThreadPoolSize(cfg.getPublicThreadPoolSize(), "public");
+
             execSvc = new IgniteThreadPoolExecutor(
                 "pub",
                 cfg.getGridName(),
@@ -1652,6 +1654,8 @@
 
             // Note that since we use 'LinkedBlockingQueue', number of
             // maximum threads has no effect.
+            validateThreadPoolSize(cfg.getSystemThreadPoolSize(), "system");
+
             sysExecSvc = new IgniteThreadPoolExecutor(
                 "sys",
                 cfg.getGridName(),
@@ -1666,6 +1670,8 @@
             // maximum threads has no effect.
             // Note, that we do not pre-start threads here as management pool may
             // not be needed.
+            validateThreadPoolSize(cfg.getManagementThreadPoolSize(), "management");
+
             mgmtExecSvc = new IgniteThreadPoolExecutor(
                 "mgmt",
                 cfg.getGridName(),
@@ -1680,6 +1686,7 @@
             // maximum threads has no effect.
             // Note, that we do not pre-start threads here as class loading pool may
             // not be needed.
+            validateThreadPoolSize(cfg.getPeerClassLoadingThreadPoolSize(), "peer class loading");
             p2pExecSvc = new IgniteThreadPoolExecutor(
                 "p2p",
                 cfg.getGridName(),
@@ -1691,6 +1698,8 @@
             p2pExecSvc.allowCoreThreadTimeOut(true);
 
             // Note that we do not pre-start threads here as igfs pool may not be needed.
+            validateThreadPoolSize(cfg.getIgfsThreadPoolSize(), "IGFS");
+
             igfsExecSvc = new IgniteThreadPoolExecutor(
                 cfg.getIgfsThreadPoolSize(),
                 cfg.getIgfsThreadPoolSize(),
@@ -1702,12 +1711,16 @@
             igfsExecSvc.allowCoreThreadTimeOut(true);
 
             // Note that we do not pre-start threads here as this pool may not be needed.
+            validateThreadPoolSize(cfg.getAsyncCallbackPoolSize(), "async callback");
+
             callbackExecSvc = new IgniteStripedThreadPoolExecutor(
                 cfg.getAsyncCallbackPoolSize(),
                 cfg.getGridName(),
                 "callback");
 
             if (myCfg.getConnectorConfiguration() != null) {
+                validateThreadPoolSize(myCfg.getConnectorConfiguration().getThreadPoolSize(), "connector");
+
                 restExecSvc = new IgniteThreadPoolExecutor(
                     "rest",
                     myCfg.getGridName(),
@@ -1720,6 +1733,8 @@
                 restExecSvc.allowCoreThreadTimeOut(true);
             }
 
+            validateThreadPoolSize(myCfg.getUtilityCacheThreadPoolSize(), "utility cache");
+
             utilityCacheExecSvc = new IgniteThreadPoolExecutor(
                 "utility",
                 cfg.getGridName(),
@@ -1730,6 +1745,8 @@
 
             utilityCacheExecSvc.allowCoreThreadTimeOut(true);
 
+            validateThreadPoolSize(myCfg.getMarshallerCacheThreadPoolSize(), "marshaller cache");
+
             marshCacheExecSvc = new IgniteThreadPoolExecutor(
                 "marshaller-cache",
                 cfg.getGridName(),
@@ -1838,6 +1855,19 @@
         }
 
         /**
+         * @param poolSize an actual value in the configuration.
+         * @param poolName a name of the pool like 'management'.
+         * @throws IgniteCheckedException If the poolSize is wrong.
+         */
+        private static void validateThreadPoolSize(int poolSize, String poolName)
+            throws IgniteCheckedException {
+            if (poolSize <= 0) {
+                throw new IgniteCheckedException("Invalid " + poolName + " thread pool size" +
+                    " (must be greater than 0), actual value: " + poolSize);
+            }
+        }
+
+        /**
          * @param cfg Ignite configuration copy to.
          * @return New ignite configuration.
          * @throws IgniteCheckedException If failed.
@@ -2019,7 +2049,7 @@
             if (userCaches != null && userCaches.length > 0) {
                 if (!U.discoOrdered(cfg.getDiscoverySpi()) && !U.relaxDiscoveryOrdered())
                     throw new IgniteCheckedException("Discovery SPI implementation does not support node ordering and " +
-                        "cannot be used with cache (use SPI with @GridDiscoverySpiOrderSupport annotation, " +
+                        "cannot be used with cache (use SPI with @DiscoverySpiOrderSupport annotation, " +
                         "like TcpDiscoverySpi)");
 
                 for (CacheConfiguration ccfg : userCaches) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java
index e501d27..f3e368d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java
@@ -28,6 +28,7 @@
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileLock;
 import java.nio.channels.OverlappingFileLockException;
+import java.nio.charset.StandardCharsets;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
@@ -180,7 +181,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected String className(int id) throws IgniteCheckedException {
+    @Override public String className(int id) throws IgniteCheckedException {
         GridCacheAdapter<Integer, String> cache0 = cache;
 
         if (cache0 == null) {
@@ -209,7 +210,7 @@
 
                     assert fileLock != null : fileName;
 
-                    try (BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
+                    try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) {
                         clsName = reader.readLine();
                     }
                 }
@@ -298,7 +299,7 @@
 
                             assert fileLock != null : fileName;
 
-                            try (Writer writer = new OutputStreamWriter(out)) {
+                            try (Writer writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
                                 writer.write(evt.getValue());
 
                                 writer.flush();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
index 98d8c5e..aa5e63f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
@@ -132,14 +132,7 @@
     /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override public <K, V> ConcurrentMap<K, V> nodeLocalMap() {
-        guard();
-
-        try {
-            return nodeLoc;
-        }
-        finally {
-            unguard();
-        }
+        return nodeLoc;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
index 5e30bf6..0bf8328 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java
@@ -317,7 +317,7 @@
                         if (ctx.localNodeId().equals(e.getKey())) {
                             // Warn only if mode is not CONTINUOUS.
                             if (meta.deploymentMode() != CONTINUOUS)
-                                LT.warn(log, null, "Local node is in participants (most probably, " +
+                                LT.warn(log, "Local node is in participants (most probably, " +
                                     "IgniteConfiguration.getPeerClassLoadingLocalClassPathExclude() " +
                                     "is not used properly " +
                                     "[locNodeId=" + ctx.localNodeId() + ", meta=" + meta + ']');
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
index d24f900..931d99a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
@@ -995,7 +995,7 @@
                 break;
 
             if (ctx.config().isWaitForSegmentOnStart()) {
-                LT.warn(log, null, "Failed to check network segment (retrying every 2000 ms).");
+                LT.warn(log, "Failed to check network segment (retrying every 2000 ms).");
 
                 // Wait and check again.
                 U.sleep(2000);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
index 5b451a1..607bb96 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
@@ -278,7 +278,7 @@
             int type = evt.type();
 
             if (!isRecordable(type)) {
-                LT.warn(log, null, "Trying to record event without checking if it is recordable: " +
+                LT.warn(log, "Trying to record event without checking if it is recordable: " +
                     U.gridEventName(type));
             }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
index 60800c0..66b71b4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
@@ -1718,6 +1718,14 @@
     }
 
     /**
+     * @return {@code True} if the value for the cache object has to be copied because
+     * of {@link CacheConfiguration#isCopyOnRead()}.
+     */
+    public boolean needValueCopy() {
+        return affNode && cacheCfg.isCopyOnRead() && cacheCfg.getMemoryMode() != OFFHEAP_VALUES;
+    }
+
+    /**
      * Converts temporary offheap object to heap-based.
      *
      * @param obj Object.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
index 8e66233..ad4892b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java
@@ -399,7 +399,7 @@
                     ", daemon=" + daemon + ']');
 
             if (!daemon) {
-                LT.warn(log, null, "Ignoring deployment in PRIVATE or ISOLATED mode " +
+                LT.warn(log, "Ignoring deployment in PRIVATE or ISOLATED mode " +
                     "[sndId=" + sndId + ", ldrId=" + ldrId + ", userVer=" + userVer + ", mode=" + mode +
                     ", participants=" + participants + ", daemon=" + daemon + ']');
             }
@@ -408,7 +408,7 @@
         }
 
         if (mode != cctx.gridConfig().getDeploymentMode()) {
-            LT.warn(log, null, "Local and remote deployment mode mismatch (please fix configuration and restart) " +
+            LT.warn(log, "Local and remote deployment mode mismatch (please fix configuration and restart) " +
                 "[locDepMode=" + cctx.gridConfig().getDeploymentMode() + ", rmtDepMode=" + mode + ", rmtNodeId=" +
                 sndId + ']');
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
index ec8b8cc..1c18738 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java
@@ -251,7 +251,7 @@
         assert key != null || type == EVT_CACHE_STARTED || type == EVT_CACHE_STOPPED;
 
         if (!cctx.events().isRecordable(type))
-            LT.warn(log, null, "Added event without checking if event is recordable: " + U.gridEventName(type));
+            LT.warn(log, "Added event without checking if event is recordable: " + U.gridEventName(type));
 
         // Events are not fired for internal entry.
         if (key == null || !key.internal()) {
@@ -261,7 +261,7 @@
                 evtNode = findNodeInHistory(evtNodeId);
 
             if (evtNode == null)
-                LT.warn(log, null, "Failed to find event node in grid topology history " +
+                LT.warn(log, "Failed to find event node in grid topology history " +
                     "(try to increase topology history size configuration property of configured " +
                     "discovery SPI): " + evtNodeId);
 
@@ -284,7 +284,7 @@
                     log.debug("Failed to unmarshall cache object value for the event notification: " + e);
 
                 if (!forceKeepBinary)
-                    LT.warn(log, null, "Failed to unmarshall cache object value for the event notification " +
+                    LT.warn(log, "Failed to unmarshall cache object value for the event notification " +
                         "(all further notifications will keep binary object format).");
 
                 forceKeepBinary = true;
@@ -351,7 +351,7 @@
         assert discoTs > 0;
 
         if (!cctx.events().isRecordable(type))
-            LT.warn(log, null, "Added event without checking if event is recordable: " + U.gridEventName(type));
+            LT.warn(log, "Added event without checking if event is recordable: " + U.gridEventName(type));
 
         cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), cctx.localNode(),
             "Cache rebalancing event.", type, part, discoNode, discoType, discoTs));
@@ -364,7 +364,7 @@
      */
     public void addUnloadEvent(int part) {
         if (!cctx.events().isRecordable(EVT_CACHE_REBALANCE_PART_UNLOADED))
-            LT.warn(log, null, "Added event without checking if event is recordable: " +
+            LT.warn(log, "Added event without checking if event is recordable: " +
                 U.gridEventName(EVT_CACHE_REBALANCE_PART_UNLOADED));
 
         cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), cctx.localNode(),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
index fd6abbd..18ed9c5 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
@@ -2283,99 +2283,92 @@
         if (checkThreadTx)
             checkEmptyTransactions();
 
-        DynamicCacheDescriptor desc = registeredCaches.get(maskNull(cacheName));
+        try {
+            DynamicCacheChangeRequest req = prepareCacheChangeRequest(
+                ccfg,
+                cacheName,
+                nearCfg,
+                cacheType,
+                failIfExists,
+                failIfNotStarted);
 
-        DynamicCacheChangeRequest req = new DynamicCacheChangeRequest(cacheName, ctx.localNodeId());
+            if (req != null)
+                return F.first(initiateCacheChanges(F.asList(req), failIfExists));
+            else
+                return new GridFinishedFuture<>();
+        }
+        catch (Exception e) {
+            return new GridFinishedFuture<>(e);
+        }
+    }
 
-        req.failIfExists(failIfExists);
+    /**
+     * Dynamically starts multiple caches.
+     *
+     * @param ccfgList Collection of cache configuration.
+     * @param failIfExists Fail if exists flag.
+     * @param checkThreadTx If {@code true} checks that current thread does not have active transactions.
+     * @return Future that will be completed when all caches are deployed.
+     */
+    public IgniteInternalFuture<?> dynamicStartCaches(
+        Collection<CacheConfiguration> ccfgList,
+        boolean failIfExists,
+        boolean checkThreadTx
+    ) {
+        return dynamicStartCaches(ccfgList, CacheType.USER, failIfExists, checkThreadTx);
+    }
 
-        if (ccfg != null) {
-            try {
-                cloneCheckSerializable(ccfg);
-            }
-            catch (IgniteCheckedException e) {
-                return new GridFinishedFuture<>(e);
-            }
+    /**
+     * Dynamically starts multiple caches.
+     *
+     * @param ccfgList Collection of cache configuration.
+     * @param cacheType Cache type.
+     * @param failIfExists Fail if exists flag.
+     * @param checkThreadTx If {@code true} checks that current thread does not have active transactions.
+     * @return Future that will be completed when all caches are deployed.
+     */
+    private IgniteInternalFuture<?> dynamicStartCaches(
+        Collection<CacheConfiguration> ccfgList,
+        CacheType cacheType,
+        boolean failIfExists,
+        boolean checkThreadTx
+    ) {
+        if (checkThreadTx)
+            checkEmptyTransactions();
 
-            if (desc != null) {
-                if (failIfExists) {
-                    return new GridFinishedFuture<>(new CacheExistsException("Failed to start cache " +
-                        "(a cache with the same name is already started): " + cacheName));
-                }
-                else {
-                    CacheConfiguration descCfg = desc.cacheConfiguration();
+        List<DynamicCacheChangeRequest> reqList = new ArrayList<>(ccfgList.size());
 
-                    // Check if we were asked to start a near cache.
-                    if (nearCfg != null) {
-                        if (CU.affinityNode(ctx.discovery().localNode(), descCfg.getNodeFilter())) {
-                            // If we are on a data node and near cache was enabled, return success, else - fail.
-                            if (descCfg.getNearConfiguration() != null)
-                                return new GridFinishedFuture<>();
-                            else
-                                return new GridFinishedFuture<>(new IgniteCheckedException("Failed to start near " +
-                                    "cache (local node is an affinity node for cache): " + cacheName));
-                        }
-                        else
-                            // If local node has near cache, return success.
-                            req.clientStartOnly(true);
-                    }
-                    else
-                        req.clientStartOnly(true);
+        try {
+            for (CacheConfiguration ccfg : ccfgList) {
+                DynamicCacheChangeRequest req = prepareCacheChangeRequest(
+                    ccfg,
+                    ccfg.getName(),
+                    null,
+                    cacheType,
+                    failIfExists,
+                    true
+                );
 
-                    req.deploymentId(desc.deploymentId());
-
-                    req.startCacheConfiguration(descCfg);
-                }
-            }
-            else {
-                req.deploymentId(IgniteUuid.randomUuid());
-
-                try {
-                    CacheConfiguration cfg = new CacheConfiguration(ccfg);
-
-                    CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg);
-
-                    initialize(false, cfg, cacheObjCtx);
-
-                    req.startCacheConfiguration(cfg);
-                }
-                catch (IgniteCheckedException e) {
-                    return new GridFinishedFuture(e);
-                }
+                if (req != null)
+                    reqList.add(req);
             }
         }
-        else {
-            req.clientStartOnly(true);
-
-            if (desc != null)
-                ccfg = desc.cacheConfiguration();
-
-            if (ccfg == null) {
-                if (failIfNotStarted)
-                    return new GridFinishedFuture<>(new CacheExistsException("Failed to start client cache " +
-                        "(a cache with the given name is not started): " + cacheName));
-                else
-                    return new GridFinishedFuture<>();
-            }
-
-            req.deploymentId(desc.deploymentId());
-            req.startCacheConfiguration(ccfg);
+        catch (Exception e) {
+            return new GridFinishedFuture<>(e);
         }
 
-        // Fail cache with swap enabled creation on grid without swap space SPI.
-        if (ccfg.isSwapEnabled())
-            for (ClusterNode n : ctx.discovery().allNodes())
-                if (!GridCacheUtils.clientNode(n) && !GridCacheUtils.isSwapEnabled(n))
-                    return new GridFinishedFuture<>(new IgniteCheckedException("Failed to start cache " +
-                        cacheName + " with swap enabled: Remote Node with ID " + n.id().toString().toUpperCase() +
-                        " has not swap SPI configured"));
+        if (!reqList.isEmpty()) {
+            GridCompoundFuture<?, ?> compoundFut = new GridCompoundFuture<>();
 
-        if (nearCfg != null)
-            req.nearCacheConfiguration(nearCfg);
+            for (DynamicCacheStartFuture fut : initiateCacheChanges(reqList, failIfExists))
+                compoundFut.add((IgniteInternalFuture)fut);
 
-        req.cacheType(cacheType);
+            compoundFut.markInitialized();
 
-        return F.first(initiateCacheChanges(F.asList(req), failIfExists));
+            return compoundFut;
+        }
+        else
+            return new GridFinishedFuture<>();
     }
 
     /**
@@ -2395,6 +2388,35 @@
     }
 
     /**
+     * @param cacheNames Collection of cache names to destroy.
+     * @param checkThreadTx If {@code true} checks that current thread does not have active transactions.
+     * @return Future that will be completed when cache is destroyed.
+     */
+    public IgniteInternalFuture<?> dynamicDestroyCaches(Collection<String> cacheNames, boolean checkThreadTx) {
+        if (checkThreadTx)
+            checkEmptyTransactions();
+
+        List<DynamicCacheChangeRequest> reqs = new ArrayList<>(cacheNames.size());
+
+        for (String cacheName : cacheNames) {
+            DynamicCacheChangeRequest t = new DynamicCacheChangeRequest(cacheName, ctx.localNodeId());
+
+            t.stop(true);
+
+            reqs.add(t);
+        }
+
+        GridCompoundFuture<?, ?> compoundFut = new GridCompoundFuture<>();
+
+        for (DynamicCacheStartFuture fut : initiateCacheChanges(reqs, false))
+            compoundFut.add((IgniteInternalFuture)fut);
+
+        compoundFut.markInitialized();
+
+        return compoundFut;
+    }
+
+    /**
      * @param cacheName Cache name to close.
      * @return Future that will be completed when cache is closed.
      */
@@ -2415,6 +2437,7 @@
 
     /**
      * @param reqs Requests.
+     * @param failIfExists Fail if exists flag.
      * @return Collection of futures.
      */
     @SuppressWarnings("TypeMayBeWeakened")
@@ -3607,6 +3630,114 @@
     }
 
     /**
+     * Prepares DynamicCacheChangeRequest for cache creation.
+     *
+     * @param ccfg Cache configuration
+     * @param cacheName Cache name
+     * @param nearCfg Near cache configuration
+     * @param cacheType Cache type
+     * @param failIfExists Fail if exists flag.
+     * @param failIfNotStarted If {@code true} fails if cache is not started.
+     * @return Request or {@code null} if cache already exists.
+     * @throws IgniteCheckedException if some of pre-checks failed
+     * @throws CacheExistsException if cache exists and failIfExists flag is {@code true}
+     */
+    private DynamicCacheChangeRequest prepareCacheChangeRequest(
+        @Nullable CacheConfiguration ccfg,
+        String cacheName,
+        @Nullable NearCacheConfiguration nearCfg,
+        CacheType cacheType,
+        boolean failIfExists,
+        boolean failIfNotStarted
+    ) throws IgniteCheckedException {
+        DynamicCacheDescriptor desc = registeredCaches.get(maskNull(cacheName));
+
+        DynamicCacheChangeRequest req = new DynamicCacheChangeRequest(cacheName, ctx.localNodeId());
+
+        req.failIfExists(failIfExists);
+
+        if (ccfg != null) {
+            cloneCheckSerializable(ccfg);
+
+            if (desc != null) {
+                if (failIfExists) {
+                    throw new CacheExistsException("Failed to start cache " +
+                        "(a cache with the same name is already started): " + cacheName);
+                }
+                else {
+                    CacheConfiguration descCfg = desc.cacheConfiguration();
+
+                    // Check if we were asked to start a near cache.
+                    if (nearCfg != null) {
+                        if (CU.affinityNode(ctx.discovery().localNode(), descCfg.getNodeFilter())) {
+                            // If we are on a data node and near cache was enabled, return success, else - fail.
+                            if (descCfg.getNearConfiguration() != null)
+                                return null;
+                            else
+                                throw new IgniteCheckedException("Failed to start near " +
+                                    "cache (local node is an affinity node for cache): " + cacheName);
+                        }
+                        else
+                            // If local node has near cache, return success.
+                            req.clientStartOnly(true);
+                    }
+                    else
+                        req.clientStartOnly(true);
+
+                    req.deploymentId(desc.deploymentId());
+
+                    req.startCacheConfiguration(descCfg);
+                }
+            }
+            else {
+                req.deploymentId(IgniteUuid.randomUuid());
+
+                CacheConfiguration cfg = new CacheConfiguration(ccfg);
+
+                CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg);
+
+                initialize(false, cfg, cacheObjCtx);
+
+                req.startCacheConfiguration(cfg);
+            }
+        }
+        else {
+            req.clientStartOnly(true);
+
+            if (desc != null)
+                ccfg = desc.cacheConfiguration();
+
+            if (ccfg == null) {
+                if (failIfNotStarted) {
+                    throw new CacheExistsException("Failed to start client cache " +
+                        "(a cache with the given name is not started): " + cacheName);
+                }
+                else
+                    return null;
+            }
+
+            req.deploymentId(desc.deploymentId());
+            req.startCacheConfiguration(ccfg);
+        }
+
+        // Fail cache with swap enabled creation on grid without swap space SPI.
+        if (ccfg.isSwapEnabled())
+            for (ClusterNode n : ctx.discovery().allNodes())
+                if (!GridCacheUtils.clientNode(n) && !GridCacheUtils.isSwapEnabled(n)) {
+                    throw new IgniteCheckedException("Failed to start cache " +
+                        cacheName + " with swap enabled: Remote Node with ID " + n.id().toString().toUpperCase() +
+                        " has not swap SPI configured");
+                }
+
+        if (nearCfg != null)
+            req.nearCacheConfiguration(nearCfg);
+
+        req.cacheType(cacheType);
+
+        return req;
+    }
+
+    /**
      * @param obj Object to clone.
      * @return Object copy.
      * @throws IgniteCheckedException If failed.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
index fd0b471..5ada8dc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSwapManager.java
@@ -138,6 +138,20 @@
             initOffHeap();
     }
 
+
+    /** {@inheritDoc} */
+    @Override protected void stop0(boolean cancel) {
+        if (offheapEnabled)
+            offheap.destruct(spaceName);
+
+        try {
+            clearSwap();
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to clear cache swap space", e);
+        }
+    }
+
     /**
      *
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
index 7312386..c07a817 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
@@ -426,10 +426,10 @@
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<Boolean> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<Boolean> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
index 913580f..d2a3b3c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
@@ -29,7 +29,6 @@
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
@@ -50,7 +49,6 @@
 import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
@@ -277,7 +275,7 @@
         // Optimization to avoid going through compound future,
         // if getAsync() has been completed and no other futures added to this
         // compound future.
-        if (fut.isDone() && futuresSize() == 0) {
+        if (fut.isDone() && futuresCount() == 0) {
             if (fut.error() != null)
                 onDone(fut.error());
             else
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
index 39a3e08..3ce1dd8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
@@ -124,9 +124,10 @@
     /**
      * @param cctx Context.
      * @param id Partition ID.
+     * @param entryFactory Entry factory.
      */
-    @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor") GridDhtLocalPartition(GridCacheContext cctx, int id,
-        GridCacheMapEntryFactory entryFactory) {
+    @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor")
+    GridDhtLocalPartition(GridCacheContext cctx, int id, GridCacheMapEntryFactory entryFactory) {
         assert cctx != null;
 
         this.id = id;
@@ -478,7 +479,7 @@
         shouldBeRenting = true;
 
         if ((reservations & 0xFFFF) == 0 && casState(reservations, RENTING)) {
-                shouldBeRenting = false;
+            shouldBeRenting = false;
 
             if (log.isDebugEnabled())
                 log.debug("Moved partition to RENTING state: " + this);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
index b005b29..f2b5f49 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
@@ -293,7 +293,7 @@
      * @return Entries.
      */
     public Collection<GridDhtCacheEntry> entriesCopy() {
-        synchronized (futs) {
+        synchronized (sync) {
             return new ArrayList<>(entries());
         }
     }
@@ -408,7 +408,7 @@
             return null;
         }
 
-        synchronized (futs) {
+        synchronized (sync) {
             entries.add(c == null || c.reentry() ? null : entry);
 
             if (c != null && !c.reentry())
@@ -542,10 +542,10 @@
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                MiniFuture mini = (MiniFuture)futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                MiniFuture mini = (MiniFuture) future(i);
 
                 if (mini.futureId().equals(miniId)) {
                     if (!mini.isDone())
@@ -610,7 +610,7 @@
      * @param t Error.
      */
     public void onError(Throwable t) {
-        synchronized (futs) {
+        synchronized (sync) {
             if (err != null)
                 return;
 
@@ -657,7 +657,7 @@
             log.debug("Received onOwnerChanged() callback [entry=" + entry + ", owner=" + owner + "]");
 
         if (owner != null && owner.version().equals(lockVer)) {
-            synchronized (futs) {
+            synchronized (sync) {
                 if (!pendingLocks.remove(entry.key()))
                     return false;
             }
@@ -675,7 +675,7 @@
      * @return {@code True} if locks have been acquired.
      */
     private boolean checkLocks() {
-        synchronized (futs) {
+        synchronized (sync) {
             return pendingLocks.isEmpty();
         }
     }
@@ -708,7 +708,7 @@
         if (isDone() || (err == null && success && !checkLocks()))
             return false;
 
-        synchronized (futs) {
+        synchronized (sync) {
             if (this.err == null)
                 this.err = err;
         }
@@ -787,7 +787,7 @@
      * @param entries Entries.
      */
     private void map(Iterable<GridDhtCacheEntry> entries) {
-        synchronized (futs) {
+        synchronized (sync) {
             if (mapped)
                 return;
 
@@ -1119,13 +1119,13 @@
             if (log.isDebugEnabled())
                 log.debug("Timed out waiting for lock response: " + this);
 
-            synchronized (futs) {
+            synchronized (sync) {
                 timedOut = true;
 
                 // Stop locks and responses processing.
                 pendingLocks.clear();
 
-                futs.clear();
+                clear();
             }
 
             boolean releaseLocks = !(inTx() && cctx.tm().deadlockDetectionEnabled());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
index 1dbda69..b2b4430 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
@@ -278,7 +278,7 @@
 
         boolean rmv;
 
-        synchronized (futs) {
+        synchronized (sync) {
             rmv = lockKeys.remove(entry.txKey());
         }
 
@@ -309,7 +309,7 @@
         if (!locksReady)
             return false;
 
-        synchronized (futs) {
+        synchronized (sync) {
             return lockKeys.isEmpty();
         }
     }
@@ -562,11 +562,11 @@
      */
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
-        synchronized (futs) {
-            // We iterate directly over the futs collection here to avoid copy.
+        // We iterate directly over the futs collection here to avoid copy.
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<IgniteInternalTx> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<IgniteInternalTx> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -580,9 +580,9 @@
                         return null;
                 }
             }
-
-            return null;
         }
+
+        return null;
     }
 
     /**
@@ -620,7 +620,7 @@
             }
 
             if (tx.optimistic() && txEntry.explicitVersion() == null) {
-                synchronized (futs) {
+                synchronized (sync) {
                     lockKeys.add(txEntry.txKey());
                 }
             }
@@ -1341,7 +1341,7 @@
 
                         MiniFuture fut = new MiniFuture(nearMapping.node().id(), null, nearMapping);
 
-                        add(fut);
+                        add(fut); // Append new future.
 
                         GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(
                             futId,
@@ -1795,8 +1795,8 @@
 
         /** {@inheritDoc} */
         @Override public void onTimeout() {
-            synchronized (futs) {
-                futs.clear();
+            synchronized (sync) {
+                clear();
 
                 lockKeys.clear();
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
index b0eea01..ddb6500 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
@@ -444,7 +444,7 @@
      * @return Keys for which locks requested from remote nodes but response isn't received.
      */
     public Set<IgniteTxKey> requestedKeys() {
-        synchronized (futs) {
+        synchronized (sync) {
             if (timeoutObj != null && timeoutObj.requestedKeys != null)
                 return timeoutObj.requestedKeys;
 
@@ -481,10 +481,10 @@
     @SuppressWarnings({"ForLoopReplaceableByForEach", "IfMayBeConditional"})
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<Boolean> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<Boolean> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -1331,10 +1331,10 @@
                 log.debug("Timed out waiting for lock response: " + this);
 
             if (inTx() && cctx.tm().deadlockDetectionEnabled()) {
-                synchronized (futs) {
+                synchronized (sync) {
                     requestedKeys = requestedKeys0();
 
-                    futs.clear(); // Stop response processing.
+                    clear(); // Stop response processing.
                 }
 
                 Set<IgniteTxKey> keys = new HashSet<>();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
index 3d9b6ab..02f6cce 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
@@ -490,7 +490,7 @@
      * @return Keys for which locks requested from remote nodes but response isn't received.
      */
     public Set<IgniteTxKey> requestedKeys() {
-        synchronized (futs) {
+        synchronized (sync) {
             if (timeoutObj != null && timeoutObj.requestedKeys != null)
                 return timeoutObj.requestedKeys;
 
@@ -527,10 +527,10 @@
     @SuppressWarnings({"ForLoopReplaceableByForEach", "IfMayBeConditional"})
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<Boolean> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<Boolean> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -1427,10 +1427,10 @@
             timedOut = true;
 
             if (inTx() && cctx.tm().deadlockDetectionEnabled()) {
-                synchronized (futs) {
+                synchronized (sync) {
                     requestedKeys = requestedKeys0();
 
-                    futs.clear(); // Stop response processing.
+                    clear(); // Stop response processing.
                 }
 
                 Set<IgniteTxKey> keys = new HashSet<>();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
index 4cbfb27..3676a3c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
@@ -229,10 +229,10 @@
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
index 91cfbda..87c9e1d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
@@ -201,9 +201,9 @@
      */
     @SuppressWarnings("ForLoopReplaceableByForEach")
     public Set<IgniteTxKey> requestedKeys() {
-        synchronized (futs) {
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = futs.get(i);
+        synchronized (sync) {
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
 
                 if (isMini(fut) && !fut.isDone()) {
                     MiniFuture miniFut = (MiniFuture)fut;
@@ -232,10 +232,10 @@
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -686,9 +686,9 @@
             if (keyLockFut != null)
                 keys = new HashSet<>(keyLockFut.lockKeys);
             else {
-                if (futs != null && !futs.isEmpty()) {
-                    for (int i = 0; i < futs.size(); i++) {
-                        IgniteInternalFuture<GridNearTxPrepareResponse> fut = futs.get(i);
+                synchronized (sync) {
+                    for (int i = 0; i < futuresCount(); i++) {
+                        IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
 
                         if (isMini(fut) && !fut.isDone()) {
                             MiniFuture miniFut = (MiniFuture)fut;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
index 5c09398..01fb5fd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
@@ -132,10 +132,10 @@
     @SuppressWarnings("ForLoopReplaceableByForEach")
     private MiniFuture miniFuture(IgniteUuid miniId) {
         // We iterate directly over the futs collection here to avoid copy.
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation.
-            for (int i = 0; i < futs.size(); i++) {
-                MiniFuture mini = (MiniFuture)futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                MiniFuture mini = (MiniFuture) future(i);
 
                 if (mini.futureId().equals(miniId)) {
                     if (!mini.isDone())
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
index 46604c7..f14d747 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
@@ -200,9 +200,9 @@
         if (!isDone()) {
             FinishMiniFuture finishFut = null;
 
-            synchronized (futs) {
-                for (int i = 0; i < futs.size(); i++) {
-                    IgniteInternalFuture<IgniteInternalTx> fut = futs.get(i);
+            synchronized (sync) {
+                for (int i = 0; i < futuresCount(); i++) {
+                    IgniteInternalFuture<IgniteInternalTx> fut = future(i);
 
                     if (fut.getClass() == FinishMiniFuture.class) {
                         FinishMiniFuture f = (FinishMiniFuture)fut;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
index cd0c50f..024375e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
@@ -547,7 +547,7 @@
             return true;
         }
 
-        LT.warn(log, null, "Calling Cache.loadCache() method will have no effect, " +
+        LT.warn(log, "Calling Cache.loadCache() method will have no effect, " +
             "CacheConfiguration.getStore() is not defined for cache: " + cctx.namexx());
 
         return false;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStore.java
index 468945b..858d9a7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStore.java
@@ -705,7 +705,7 @@
             }
         }
         catch (Exception e) {
-            LT.warn(log, e, "Unable to update underlying store: " + store);
+            LT.error(log, e, "Unable to update underlying store: " + store);
 
             if (writeCache.sizex() > cacheCriticalSize || stopping.get()) {
                 for (Map.Entry<K, Entry<? extends K, ? extends  V>> entry : vals.entrySet()) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
index fbd8ce5..4b99079 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
@@ -686,7 +686,7 @@
 
         IgniteInternalFuture<IgniteInternalTx> fut = finish(nodeId, null, req);
 
-        assert req.txState() != null || fut.error() != null ||
+        assert req.txState() != null || (fut != null && fut.error() != null) ||
             (ctx.tm().tx(req.version()) == null && ctx.tm().nearTx(req.version()) == null);
 
         return fut;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
index 50de328..dae50bf 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCacheLazyPlainVersionedEntry.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
  * Lazy plain versioned entry.
@@ -104,4 +105,9 @@
         return val;
     }
 
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridCacheLazyPlainVersionedEntry.class, this,
+            "super", super.toString(), "key", key(), "val", value());
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCachePlainVersionedEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCachePlainVersionedEntry.java
index dd682e9..c175e5a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCachePlainVersionedEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/version/GridCachePlainVersionedEntry.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache.version;
 
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -25,9 +26,11 @@
  */
 public class GridCachePlainVersionedEntry<K, V> implements GridCacheVersionedEntryEx<K, V> {
     /** Key. */
+    @GridToStringInclude
     protected K key;
 
     /** Value. */
+    @GridToStringInclude
     protected V val;
 
     /** TTL. */
@@ -125,4 +128,4 @@
     @Override public String toString() {
         return S.toString(GridCachePlainVersionedEntry.class, this);
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/clock/GridClockSyncProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/clock/GridClockSyncProcessor.java
index b5c89cf..0764316 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/clock/GridClockSyncProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/clock/GridClockSyncProcessor.java
@@ -458,7 +458,7 @@
                     srv.sendPacket(req, addr, port);
                 }
                 catch (IgniteCheckedException e) {
-                    LT.warn(log, e, "Failed to send time request to remote node [rmtNodeId=" + rmtNodeId +
+                    LT.error(log, e, "Failed to send time request to remote node [rmtNodeId=" + rmtNodeId +
                         ", addr=" + addr + ", port=" + port + ']');
                 }
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
index caf3ba3..6e087e6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
@@ -834,7 +834,7 @@
                 return hdr.head();
             }
 
-            long next = hdr.head() + 1;
+            long next = hdr.head();
 
             rmvdIdxs = new HashSet<>(rmvdIdxs);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFragmentizerManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFragmentizerManager.java
index d64c64a..2e82f33 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFragmentizerManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFragmentizerManager.java
@@ -17,6 +17,19 @@
 
 package org.apache.ignite.internal.processors.igfs;
 
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cluster.ClusterNode;
@@ -41,20 +54,6 @@
 import org.apache.ignite.thread.IgniteThread;
 import org.jetbrains.annotations.Nullable;
 
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
 import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
@@ -383,7 +382,7 @@
                 }
                 catch (IgniteCheckedException | IgniteException e) {
                     if (!X.hasCause(e, InterruptedException.class) && !X.hasCause(e, IgniteInterruptedCheckedException.class))
-                        LT.warn(log, e, "Failed to get fragmentizer file info (will retry).");
+                        LT.error(log, e, "Failed to get fragmentizer file info (will retry).");
                     else {
                         if (log.isDebugEnabled())
                             log.debug("Got interrupted exception in fragmentizer coordinator (grid is stopping).");
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
index 1c985c0..ab4ee85 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java
@@ -17,6 +17,21 @@
 
 package org.apache.ignite.internal.processors.igfs;
 
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
@@ -81,22 +96,6 @@
 import org.jetbrains.annotations.Nullable;
 import org.jsr166.ConcurrentHashMap8;
 
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
 import static org.apache.ignite.events.EventType.EVT_IGFS_DIR_DELETED;
 import static org.apache.ignite.events.EventType.EVT_IGFS_FILE_DELETED;
 import static org.apache.ignite.events.EventType.EVT_IGFS_FILE_OPENED_READ;
@@ -1308,7 +1307,7 @@
                         secondarySpaceSize = secondaryFs.usedSpaceSize();
                     }
                     catch (IgniteException e) {
-                        LT.warn(log, e, "Failed to get secondary file system consumed space size.");
+                        LT.error(log, e, "Failed to get secondary file system consumed space size.");
 
                         secondarySpaceSize = -1;
                     }
@@ -1841,4 +1840,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/nodevalidation/OsDiscoveryNodeValidationProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/nodevalidation/OsDiscoveryNodeValidationProcessor.java
index a7e06e9..37e59bc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/nodevalidation/OsDiscoveryNodeValidationProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/nodevalidation/OsDiscoveryNodeValidationProcessor.java
@@ -58,7 +58,7 @@
                     ", rmtNodeAddrs=" + U.addressesAsString(node) +
                     ", locNodeId=" + locNode.id() + ", rmtNodeId=" + node.id() + ']';
 
-                LT.warn(log, null, errMsg);
+                LT.warn(log, errMsg);
 
                 // Always output in debug.
                 if (log.isDebugEnabled())
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/offheap/GridOffHeapProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/offheap/GridOffHeapProcessor.java
index b91e9ab..47407f9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/offheap/GridOffHeapProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/offheap/GridOffHeapProcessor.java
@@ -78,6 +78,20 @@
             old.destruct();
     }
 
+    /**
+     * Destructs offheap map for given space name.
+     *
+     * @param spaceName Space name.
+     * */
+    public void destruct(@Nullable String spaceName) {
+        spaceName = maskNull(spaceName);
+
+        GridOffHeapPartitionedMap map = offheap.remove(spaceName);
+
+        if (map != null)
+            map.destruct();
+    }
+
     /** {@inheritDoc} */
     @Override public void stop(boolean cancel) throws IgniteCheckedException {
         super.stop(cancel);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformConfigurationEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformConfigurationEx.java
index 97f0866..8f7c93b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformConfigurationEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformConfigurationEx.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.platform;
 
+import org.apache.ignite.internal.logger.platform.PlatformLogger;
 import org.apache.ignite.internal.processors.platform.cache.PlatformCacheExtension;
 import org.apache.ignite.internal.logger.platform.PlatformLogger;
 import org.apache.ignite.internal.processors.platform.callback.PlatformCallbackGateway;
@@ -50,12 +51,12 @@
     public Collection<String> warnings();
 
     /**
-     * @return Available cache extensions.
-     */
-    @Nullable public Collection<PlatformCacheExtension> cacheExtensions();
-
-    /**
      * @return Platform logger.
      */
     public PlatformLogger logger();
+
+    /**
+     * @return Available cache extensions.
+     */
+    @Nullable public Collection<PlatformCacheExtension> cacheExtensions();
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cpp/PlatformCppConfigurationEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cpp/PlatformCppConfigurationEx.java
index 785c9bd..4f6bb2d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cpp/PlatformCppConfigurationEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cpp/PlatformCppConfigurationEx.java
@@ -77,12 +77,12 @@
     }
 
     /** {@inheritDoc} */
-    @Override @Nullable public Collection<PlatformCacheExtension> cacheExtensions() {
+    @Override public PlatformLogger logger() {
         return null;
     }
 
     /** {@inheritDoc} */
-    @Override public PlatformLogger logger() {
+    @Override @Nullable public Collection<PlatformCacheExtension> cacheExtensions() {
         return null;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationEx.java
index eded0e7..8448733 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationEx.java
@@ -21,14 +21,15 @@
 import org.apache.ignite.internal.processors.platform.PlatformConfigurationEx;
 import org.apache.ignite.internal.processors.platform.cache.PlatformCacheExtension;
 import org.apache.ignite.internal.processors.platform.callback.PlatformCallbackGateway;
+import org.apache.ignite.internal.processors.platform.entityframework.PlatformDotNetEntityFrameworkCacheExtension;
 import org.apache.ignite.internal.processors.platform.memory.PlatformMemoryManagerImpl;
 import org.apache.ignite.internal.processors.platform.utils.PlatformUtils;
 import org.apache.ignite.internal.processors.platform.websession.PlatformDotNetSessionCacheExtension;
 import org.apache.ignite.platform.dotnet.PlatformDotNetConfiguration;
 import org.jetbrains.annotations.Nullable;
 
+import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 
 /**
  * Extended .Net configuration.
@@ -84,7 +85,12 @@
 
     /** {@inheritDoc} */
     @Nullable @Override public Collection<PlatformCacheExtension> cacheExtensions() {
-        return Collections.<PlatformCacheExtension>singleton(new PlatformDotNetSessionCacheExtension());
+        Collection<PlatformCacheExtension> exts = new ArrayList<>(2);
+
+        exts.add(new PlatformDotNetSessionCacheExtension());
+        exts.add(new PlatformDotNetEntityFrameworkCacheExtension());
+
+        return exts;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheEntry.java
new file mode 100644
index 0000000..676b411
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheEntry.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.platform.entityframework;
+
+import org.apache.ignite.binary.BinaryObjectException;
+import org.apache.ignite.binary.BinaryRawReader;
+import org.apache.ignite.binary.BinaryRawWriter;
+import org.apache.ignite.binary.BinaryReader;
+import org.apache.ignite.binary.BinaryWriter;
+import org.apache.ignite.binary.Binarylizable;
+
+/**
+ * EntityFramework cache entry.
+ */
+public class PlatformDotNetEntityFrameworkCacheEntry implements Binarylizable {
+    /** Dependent entity set names. */
+    private String[] entitySets;
+
+    /** Cached data bytes. */
+    private byte[] data;
+
+    /**
+     * Ctor.
+     */
+    public PlatformDotNetEntityFrameworkCacheEntry() {
+        // No-op.
+    }
+
+    /**
+     * Ctor.
+     *
+     * @param entitySets Entity set names.
+     * @param data Data bytes.
+     */
+    PlatformDotNetEntityFrameworkCacheEntry(String[] entitySets, byte[] data) {
+        this.entitySets = entitySets;
+        this.data = data;
+    }
+
+    /**
+     * @return Dependent entity sets with versions.
+     */
+    public String[] entitySets() {
+        return entitySets;
+    }
+
+    /**
+     * @return Cached data bytes.
+     */
+    public byte[] data() {
+        return data;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeBinary(BinaryWriter writer) throws BinaryObjectException {
+        final BinaryRawWriter raw = writer.rawWriter();
+
+        if (entitySets != null) {
+            raw.writeInt(entitySets.length);
+
+            for (String entitySet : entitySets)
+                raw.writeString(entitySet);
+        }
+        else
+            raw.writeInt(-1);
+
+        raw.writeByteArray(data);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
+        BinaryRawReader raw = reader.rawReader();
+
+        int cnt = raw.readInt();
+
+        if (cnt >= 0) {
+            entitySets = new String[cnt];
+
+            for (int i = 0; i < cnt; i++)
+                entitySets[i] = raw.readString();
+        }
+        else
+            entitySets = null;
+
+        data = raw.readByteArray();
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheExtension.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheExtension.java
new file mode 100644
index 0000000..d4755de
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheExtension.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.platform.entityframework;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteCompute;
+import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.cluster.ClusterGroup;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.binary.BinaryRawReaderEx;
+import org.apache.ignite.internal.processors.platform.cache.PlatformCache;
+import org.apache.ignite.internal.processors.platform.cache.PlatformCacheExtension;
+import org.apache.ignite.internal.processors.platform.memory.PlatformMemory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteRunnable;
+import org.apache.ignite.resources.IgniteInstanceResource;
+
+import javax.cache.Cache;
+import javax.cache.processor.EntryProcessorResult;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * EntityFramework cache extension.
+ */
+@SuppressWarnings("unchecked")
+public class PlatformDotNetEntityFrameworkCacheExtension implements PlatformCacheExtension {
+    /** Extension ID. */
+    private static final int EXT_ID = 1;
+
+    /** Operation: increment entity set versions. */
+    private static final int OP_INVALIDATE_SETS = 1;
+
+    /** Operation: put item async. */
+    private static final int OP_PUT_ITEM = 2;
+
+    /** Operation: get item. */
+    private static final int OP_GET_ITEM = 3;
+
+    /** Cache key for cleanup node ID. */
+    private static final CleanupNodeId CLEANUP_NODE_ID = new CleanupNodeId();
+
+    /** Indicates whether local cleanup is in progress, per cache name. */
+    private final Map<String, Boolean> cleanupFlags = new ConcurrentHashMap<>();
+
+    /** {@inheritDoc} */
+    @Override public int id() {
+        return EXT_ID;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public long processInOutStreamLong(PlatformCache target, int type, BinaryRawReaderEx reader,
+        PlatformMemory mem) throws IgniteCheckedException {
+        switch (type) {
+            case OP_INVALIDATE_SETS: {
+                final IgniteCache<String, Long> metaCache = target.rawCache();
+                final String dataCacheName = reader.readString();
+
+                int cnt = reader.readInt();
+
+                assert cnt > 0;
+
+                final Set<String> entitySetNames = new HashSet(cnt);
+
+                for (int i = 0; i < cnt; i++)
+                    entitySetNames.add(reader.readString());
+
+                final Map<String, EntryProcessorResult<Long>> curVers =
+                    metaCache.invokeAll(entitySetNames, new PlatformDotNetEntityFrameworkIncreaseVersionProcessor());
+
+                if (curVers.size() != cnt)
+                    throw new IgniteCheckedException("Failed to update entity set versions [expected=" + cnt +
+                        ", actual=" + curVers.size() + ']');
+
+                Ignite grid = target.platformContext().kernalContext().grid();
+
+                startBackgroundCleanup(grid, (IgniteCache<CleanupNodeId, UUID>)(IgniteCache)metaCache,
+                    dataCacheName, curVers);
+
+                return target.writeResult(mem, null);
+            }
+
+            case OP_PUT_ITEM: {
+                String query = reader.readString();
+
+                long[] versions = null;
+                String[] entitySets = null;
+
+                int cnt = reader.readInt();
+
+                if (cnt >= 0) {
+                    versions = new long[cnt];
+                    entitySets = new String[cnt];
+
+                    for (int i = 0; i < cnt; i++) {
+                        versions[i] = reader.readLong();
+                        entitySets[i] = reader.readString();
+                    }
+                }
+
+                byte[] data = reader.readByteArray();
+
+                PlatformDotNetEntityFrameworkCacheEntry efEntry =
+                    new PlatformDotNetEntityFrameworkCacheEntry(entitySets, data);
+
+                IgniteCache<PlatformDotNetEntityFrameworkCacheKey, PlatformDotNetEntityFrameworkCacheEntry> dataCache
+                    = target.rawCache();
+
+                PlatformDotNetEntityFrameworkCacheKey key = new PlatformDotNetEntityFrameworkCacheKey(query, versions);
+
+                dataCache.put(key, efEntry);
+
+                return target.writeResult(mem, null);
+            }
+
+            case OP_GET_ITEM: {
+                String query = reader.readString();
+
+                long[] versions = null;
+
+                int cnt = reader.readInt();
+
+                if (cnt >= 0) {
+                    versions = new long[cnt];
+
+                    for (int i = 0; i < cnt; i++)
+                        versions[i] = reader.readLong();
+                }
+
+                IgniteCache<PlatformDotNetEntityFrameworkCacheKey, PlatformDotNetEntityFrameworkCacheEntry> dataCache
+                    = target.rawCache();
+
+                PlatformDotNetEntityFrameworkCacheKey key = new PlatformDotNetEntityFrameworkCacheKey(query, versions);
+
+                PlatformDotNetEntityFrameworkCacheEntry entry = dataCache.get(key);
+
+                byte[] data = entry == null ? null : entry.data();
+
+                return target.writeResult(mem, data);
+            }
+        }
+
+        throw new IgniteCheckedException("Unsupported operation type: " + type);
+    }
+
+    /**
+     * Starts the background cleanup of old cache entries.
+     *
+     * @param grid Grid.
+     * @param metaCache Meta cache.
+     * @param dataCacheName Data cache name.
+     * @param currentVersions Current versions.
+     */
+    private void startBackgroundCleanup(Ignite grid, final Cache<CleanupNodeId, UUID> metaCache,
+        final String dataCacheName, final Map<String, EntryProcessorResult<Long>> currentVersions) {
+        if (cleanupFlags.containsKey(dataCacheName))
+            return;  // Current node already performs cleanup.
+
+        if (!trySetGlobalCleanupFlag(grid, metaCache))
+            return;
+
+        cleanupFlags.put(dataCacheName, true);
+
+        final ClusterGroup dataNodes = grid.cluster().forDataNodes(dataCacheName);
+
+        IgniteCompute asyncCompute = grid.compute(dataNodes).withAsync();
+
+        asyncCompute.broadcast(new RemoveOldEntriesRunnable(dataCacheName, currentVersions));
+
+        asyncCompute.future().listen(new CleanupCompletionListener(metaCache, dataCacheName));
+    }
+
+    /**
+     * Tries to set the global cleanup node id to current node.
+     *
+     * @param grid Grid.
+     * @param metaCache Meta cache.
+     *
+     * @return True if successfully set the flag indicating that current node performs the cleanup; otherwise false.
+     */
+    private boolean trySetGlobalCleanupFlag(Ignite grid, final Cache<CleanupNodeId, UUID> metaCache) {
+        final UUID localNodeId = grid.cluster().localNode().id();
+
+        while (true) {
+            // Get the node performing cleanup.
+            UUID nodeId = metaCache.get(CLEANUP_NODE_ID);
+
+            if (nodeId == null) {
+                if (metaCache.putIfAbsent(CLEANUP_NODE_ID, localNodeId))
+                    return true;  // Successfully reserved cleanup to local node.
+
+                // Failed putIfAbsent: someone else may have started cleanup. Retry the check.
+                continue;
+            }
+
+            if (nodeId.equals(localNodeId))
+                return false;  // Current node already performs cleanup.
+
+            if (grid.cluster().node(nodeId) != null)
+                return false;  // Another node already performs cleanup and is alive.
+
+            // Node that performs cleanup has disconnected.
+            if (metaCache.replace(CLEANUP_NODE_ID, nodeId, localNodeId))
+                return true;  // Successfully replaced disconnected node id with our id.
+
+            // Replace failed: someone else started cleanup.
+            return false;
+        }
+    }
+
+    /**
+     * Removes old cache entries locally.
+     *
+     * @param ignite Ignite.
+     * @param dataCacheName Cache name.
+     * @param currentVersions Current versions.
+     */
+    private static void removeOldEntries(final Ignite ignite, final String dataCacheName,
+        final Map<String, EntryProcessorResult<Long>> currentVersions) {
+
+        IgniteCache<PlatformDotNetEntityFrameworkCacheKey, PlatformDotNetEntityFrameworkCacheEntry> cache =
+            ignite.cache(dataCacheName);
+
+        Set<PlatformDotNetEntityFrameworkCacheKey> keysToRemove = new TreeSet<>();
+
+        ClusterNode localNode = ignite.cluster().localNode();
+
+        for (Cache.Entry<PlatformDotNetEntityFrameworkCacheKey, PlatformDotNetEntityFrameworkCacheEntry> cacheEntry :
+            cache.localEntries(CachePeekMode.ALL)) {
+            // Check if we are on a primary node for the key, since we use CachePeekMode.ALL
+            // and we don't want to process backup entries.
+            if (!ignite.affinity(dataCacheName).isPrimary(localNode, cacheEntry.getKey()))
+                continue;
+
+            long[] versions = cacheEntry.getKey().versions();
+            String[] entitySets = cacheEntry.getValue().entitySets();
+
+            for (int i = 0; i < entitySets.length; i++) {
+                EntryProcessorResult<Long> curVer = currentVersions.get(entitySets[i]);
+
+                if (curVer != null && versions[i] < curVer.get())
+                    keysToRemove.add(cacheEntry.getKey());
+            }
+        }
+
+        cache.removeAll(keysToRemove);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(PlatformDotNetEntityFrameworkCacheExtension.class, this);
+    }
+
+    /**
+     * Cache key for cleanup node id.
+     */
+    private static class CleanupNodeId {
+        // No-op.
+    }
+
+    /**
+     * Old entries remover.
+     */
+    private static class RemoveOldEntriesRunnable implements IgniteRunnable {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private final String dataCacheName;
+
+        /** */
+        private final Map<String, EntryProcessorResult<Long>> currentVersions;
+
+        /** Inject Ignite. */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /**
+         * Ctor.
+         *
+         * @param dataCacheName Name of the cache to clean up.
+         * @param currentVersions Map of current entity set versions.
+         */
+        private RemoveOldEntriesRunnable(String dataCacheName,
+            Map<String, EntryProcessorResult<Long>> currentVersions) {
+            this.dataCacheName = dataCacheName;
+            this.currentVersions = currentVersions;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            removeOldEntries(ignite, dataCacheName, currentVersions);
+        }
+    }
+
+    /**
+     * Cleanup completion listener.
+     */
+    private class CleanupCompletionListener implements IgniteInClosure<IgniteFuture<Object>> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private final Cache<CleanupNodeId, UUID> metaCache;
+
+        /** */
+        private final String dataCacheName;
+
+        /**
+         * Ctor.
+         *
+         * @param metaCache Metadata cache.
+         * @param dataCacheName Data cache name.
+         */
+        private CleanupCompletionListener(Cache<CleanupNodeId, UUID> metaCache, String dataCacheName) {
+            this.metaCache = metaCache;
+            this.dataCacheName = dataCacheName;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void apply(IgniteFuture<Object> future) {
+            // Reset distributed cleanup flag.
+            metaCache.remove(CLEANUP_NODE_ID);
+
+            // Reset local cleanup flag.
+            cleanupFlags.remove(dataCacheName);
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheKey.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheKey.java
new file mode 100644
index 0000000..60fdaec
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkCacheKey.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.platform.entityframework;
+
+import org.apache.ignite.binary.BinaryObjectException;
+import org.apache.ignite.binary.BinaryRawReader;
+import org.apache.ignite.binary.BinaryRawWriter;
+import org.apache.ignite.binary.BinaryReader;
+import org.apache.ignite.binary.BinaryWriter;
+import org.apache.ignite.binary.Binarylizable;
+import org.jetbrains.annotations.NotNull;
+
+import java.util.Arrays;
+
+/**
+ * EntityFramework cache key: query + versions.
+ */
+@SuppressWarnings("WeakerAccess")
+public class PlatformDotNetEntityFrameworkCacheKey
+    implements Binarylizable, Comparable<PlatformDotNetEntityFrameworkCacheKey> {
+    /** Query text. */
+    private String query;
+
+    /** Entity set versions. */
+    private long[] versions;
+
+    /**
+     * Ctor.
+     */
+    public PlatformDotNetEntityFrameworkCacheKey() {
+        // No-op.
+    }
+
+    /**
+     * Ctor.
+     *
+     * @param query Query text.
+     * @param versions Versions.
+     */
+    PlatformDotNetEntityFrameworkCacheKey(String query, long[] versions) {
+        assert query != null;
+
+        this.query = query;
+        this.versions = versions;
+    }
+
+    /**
+     * Gets the query text.
+     *
+     * @return Query text.
+     */
+    public String query() {
+        return query;
+    }
+
+    /**
+     * Gets the entity set versions.
+     *
+     * @return Entity set versions.
+     */
+    public long[] versions() {
+        return versions;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        PlatformDotNetEntityFrameworkCacheKey key = (PlatformDotNetEntityFrameworkCacheKey)o;
+
+        //noinspection SimplifiableIfStatement
+        if (!query.equals(key.query))
+            return false;
+
+        return Arrays.equals(versions, key.versions);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int result = query.hashCode();
+
+        result = 31 * result + Arrays.hashCode(versions);
+
+        return result;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeBinary(BinaryWriter writer) throws BinaryObjectException {
+        final BinaryRawWriter raw = writer.rawWriter();
+
+        raw.writeString(query);
+
+        if (versions != null) {
+            raw.writeInt(versions.length);
+
+            for (long ver : versions)
+                raw.writeLong(ver);
+        }
+        else
+            raw.writeInt(-1);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
+        BinaryRawReader raw = reader.rawReader();
+
+        query = raw.readString();
+
+        int cnt = raw.readInt();
+
+        if (cnt >= 0) {
+            versions = new long[cnt];
+
+            for (int i = 0; i < cnt; i++)
+                versions[i] = raw.readLong();
+        }
+        else
+            versions = null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int compareTo(@NotNull PlatformDotNetEntityFrameworkCacheKey o) {
+        int cmpQuery = query.compareTo(o.query);
+
+        if (cmpQuery != 0)
+            return cmpQuery;
+
+        if (versions == null) {
+            return o.versions == null ? 0 : -1;
+        }
+
+        if (o.versions == null)
+            return 1;
+
+        assert versions.length == o.versions.length;
+
+        for (int i = 0; i < versions.length; i++) {
+            if (versions[i] != o.versions[i]) {
+                return versions[i] > o.versions[i] ? 1 : -1;
+            }
+        }
+
+        return 0;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkIncreaseVersionProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkIncreaseVersionProcessor.java
new file mode 100644
index 0000000..f10138a
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/entityframework/PlatformDotNetEntityFrameworkIncreaseVersionProcessor.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.platform.entityframework;
+
+import org.apache.ignite.cache.CacheEntryProcessor;
+
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.MutableEntry;
+
+/**
+ * Entry processor that increments entity set version number.
+ */
+public class PlatformDotNetEntityFrameworkIncreaseVersionProcessor implements CacheEntryProcessor<String, Long, Long> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public Long process(MutableEntry<String, Long> entry, Object... args) throws EntryProcessorException {
+        Long val = entry.getValue();
+
+        if (val == null)
+            val = 0L;
+
+        val++;
+
+        entry.setValue(val);
+
+        return val;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
index 9480dae..83d1bf6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
@@ -352,6 +352,7 @@
      * @param out Stream.
      * @param p Policy.
      */
+    @SuppressWarnings("TypeMayBeWeakened")
     private static void writeEvictionPolicy(BinaryRawWriter out, EvictionPolicy p) {
         if (p instanceof FifoEvictionPolicy) {
             out.writeByte((byte)1);
@@ -685,12 +686,10 @@
         assert writer != null;
         assert ccfg != null;
 
-        writer.writeInt(ccfg.getAtomicityMode() == null ?
-            CacheConfiguration.DFLT_CACHE_ATOMICITY_MODE.ordinal() : ccfg.getAtomicityMode().ordinal());
-        writer.writeInt(ccfg.getAtomicWriteOrderMode() == null ? 0 : ccfg.getAtomicWriteOrderMode().ordinal());
+        writeEnumInt(writer, ccfg.getAtomicityMode(), CacheConfiguration.DFLT_CACHE_ATOMICITY_MODE);
+        writeEnumInt(writer, ccfg.getAtomicWriteOrderMode());
         writer.writeInt(ccfg.getBackups());
-        writer.writeInt(ccfg.getCacheMode() == null ?
-            CacheConfiguration.DFLT_CACHE_MODE.ordinal() : ccfg.getCacheMode().ordinal());
+        writeEnumInt(writer, ccfg.getCacheMode(), CacheConfiguration.DFLT_CACHE_MODE);
         writer.writeBoolean(ccfg.isCopyOnRead());
         writer.writeBoolean(ccfg.isEagerTtl());
         writer.writeBoolean(ccfg.isSwapEnabled());
@@ -705,15 +704,13 @@
         writer.writeLong(ccfg.getLongQueryWarningTimeout());
         writer.writeInt(ccfg.getMaxConcurrentAsyncOperations());
         writer.writeFloat(ccfg.getEvictMaxOverflowRatio());
-        writer.writeInt(ccfg.getMemoryMode() == null ?
-            CacheConfiguration.DFLT_MEMORY_MODE.ordinal() : ccfg.getMemoryMode().ordinal());
+        writeEnumInt(writer, ccfg.getMemoryMode(), CacheConfiguration.DFLT_MEMORY_MODE);
         writer.writeString(ccfg.getName());
         writer.writeLong(ccfg.getOffHeapMaxMemory());
         writer.writeBoolean(ccfg.isReadFromBackup());
         writer.writeInt(ccfg.getRebalanceBatchSize());
         writer.writeLong(ccfg.getRebalanceDelay());
-        writer.writeInt(ccfg.getRebalanceMode() == null ?
-            CacheConfiguration.DFLT_REBALANCE_MODE.ordinal() : ccfg.getRebalanceMode().ordinal());
+        writeEnumInt(writer, ccfg.getRebalanceMode(), CacheConfiguration.DFLT_REBALANCE_MODE);
         writer.writeLong(ccfg.getRebalanceThrottle());
         writer.writeLong(ccfg.getRebalanceTimeout());
         writer.writeBoolean(ccfg.isSqlEscapeAll());
@@ -724,7 +721,7 @@
         writer.writeLong(ccfg.getWriteBehindFlushFrequency());
         writer.writeInt(ccfg.getWriteBehindFlushSize());
         writer.writeInt(ccfg.getWriteBehindFlushThreadCount());
-        writer.writeInt(ccfg.getWriteSynchronizationMode() == null ? 0 : ccfg.getWriteSynchronizationMode().ordinal());
+        writeEnumInt(writer, ccfg.getWriteSynchronizationMode());
         writer.writeBoolean(ccfg.isReadThrough());
         writer.writeBoolean(ccfg.isWriteThrough());
 
@@ -821,7 +818,7 @@
         assert index != null;
 
         writer.writeString(index.getName());
-        writer.writeByte((byte)index.getIndexType().ordinal());
+        writeEnumByte(writer, index.getIndexType());
 
         LinkedHashMap<String, Boolean> fields = index.getFields();
 
@@ -928,7 +925,7 @@
 
             w.writeInt(atomic.getAtomicSequenceReserveSize());
             w.writeInt(atomic.getBackups());
-            w.writeInt(atomic.getCacheMode().ordinal());
+            writeEnumInt(w, atomic.getCacheMode(), AtomicConfiguration.DFLT_CACHE_MODE);
         }
         else
             w.writeBoolean(false);
@@ -939,8 +936,8 @@
             w.writeBoolean(true);
 
             w.writeInt(tx.getPessimisticTxLogSize());
-            w.writeInt(tx.getDefaultTxConcurrency().ordinal());
-            w.writeInt(tx.getDefaultTxIsolation().ordinal());
+            writeEnumInt(w, tx.getDefaultTxConcurrency(), TransactionConfiguration.DFLT_TX_CONCURRENCY);
+            writeEnumInt(w, tx.getDefaultTxIsolation(), TransactionConfiguration.DFLT_TX_ISOLATION);
             w.writeLong(tx.getDefaultTxTimeout());
             w.writeInt(tx.getPessimisticTxLogLinger());
         }
@@ -1047,6 +1044,38 @@
     }
 
     /**
+     * Writes enum as byte.
+     *
+     * @param w Writer.
+     * @param e Enum.
+     */
+    private static void writeEnumByte(BinaryRawWriter w, Enum e) {
+        w.writeByte(e == null ? 0 : (byte)e.ordinal());
+    }
+
+    /**
+     * Writes enum as int.
+     *
+     * @param w Writer.
+     * @param e Enum.
+     */
+    private static void writeEnumInt(BinaryRawWriter w, Enum e) {
+        w.writeInt(e == null ? 0 : e.ordinal());
+    }
+
+    /**
+     * Writes enum as int.
+     *
+     * @param w Writer.
+     * @param e Enum.
+     */
+    private static void writeEnumInt(BinaryRawWriter w, Enum e, Enum def) {
+        assert def != null;
+
+        w.writeInt(e == null ? def.ordinal() : e.ordinal());
+    }
+
+    /**
      * Private constructor.
      */
     private PlatformConfigurationUtils() {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/query/QueryCommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/query/QueryCommandHandler.java
index 4317dd9..ee728a6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/query/QueryCommandHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/query/QueryCommandHandler.java
@@ -30,6 +30,7 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantLock;
 import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cache.query.Query;
 import org.apache.ignite.cache.query.QueryCursor;
@@ -217,22 +218,31 @@
         assert SUPPORTED_COMMANDS.contains(req.command());
         assert req instanceof RestQueryRequest : "Invalid type of query request.";
 
+        if (req.command() != CLOSE_SQL_QUERY) {
+            Integer pageSize = ((RestQueryRequest) req).pageSize();
+
+            if (pageSize == null)
+                return new GridFinishedFuture<>(
+                        new IgniteCheckedException(GridRestCommandHandlerAdapter.missingParameter("pageSize"))
+                );
+        }
+
         switch (req.command()) {
             case EXECUTE_SQL_QUERY:
             case EXECUTE_SQL_FIELDS_QUERY:
             case EXECUTE_SCAN_QUERY: {
                 return ctx.closure().callLocalSafe(
-                    new ExecuteQueryCallable(ctx, (RestQueryRequest)req, qryCurs), false);
+                        new ExecuteQueryCallable(ctx, (RestQueryRequest) req, qryCurs), false);
             }
 
             case FETCH_SQL_QUERY: {
                 return ctx.closure().callLocalSafe(
-                    new FetchQueryCallable((RestQueryRequest)req, qryCurs), false);
+                        new FetchQueryCallable((RestQueryRequest) req, qryCurs), false);
             }
 
             case CLOSE_SQL_QUERY: {
                 return ctx.closure().callLocalSafe(
-                    new CloseQueryCallable((RestQueryRequest)req, qryCurs), false);
+                        new CloseQueryCallable((RestQueryRequest) req, qryCurs), false);
             }
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java
index 7159c83..75c74db 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java
@@ -88,7 +88,7 @@
     /**
      * @return Page size.
      */
-    public int pageSize() {
+    public Integer pageSize() {
         return pageSize;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
index 0be69d1..3478c70 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
@@ -626,7 +626,7 @@
                 res.setOccupied(true);
 
                 if (resCache && jobRes.size() > ctx.discovery().size() && jobRes.size() % SPLIT_WARN_THRESHOLD == 0)
-                    LT.warn(log, null, "Number of jobs in task is too large for task: " + ses.getTaskName() +
+                    LT.warn(log, "Number of jobs in task is too large for task: " + ses.getTaskName() +
                         ". Consider reducing number of jobs or disabling job result cache with " +
                         "@ComputeTaskNoResultCache annotation.");
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridJavaProcess.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridJavaProcess.java
index 8a0b0ae..3f05e13 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridJavaProcess.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridJavaProcess.java
@@ -191,7 +191,10 @@
 
         killProc.waitFor();
 
-        assert killProc.exitValue() == 0 : "Process killing was not successful";
+        int exitVal = killProc.exitValue();
+
+        if (exitVal != 0)
+            log.info(String.format("Abnormal exit value of %s for pid %s", exitVal, pid));
 
         if (procKilledC != null)
             procKilledC.apply();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
index 745619a..c4a107a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
@@ -79,41 +79,38 @@
      * Logs warning if needed.
      *
      * @param log Logger.
-     * @param e Error (optional).
      * @param msg Message.
      */
-    public static void warn(@Nullable IgniteLogger log, @Nullable Throwable e, String msg) {
+    public static void warn(@Nullable IgniteLogger log, String msg) {
         assert !F.isEmpty(msg);
 
-        log(log, e, msg, null, LogLevel.WARN, false);
+        log(log, null, msg, null, LogLevel.WARN, false);
     }
 
     /**
      * Logs warning if needed.
      *
      * @param log Logger.
-     * @param e Error (optional).
      * @param msg Message.
-     * @param quite Print warning anyway.
+     * @param quiet Print warning anyway.
      */
-    public static void warn(@Nullable IgniteLogger log, @Nullable Throwable e, String msg, boolean quite) {
+    public static void warn(@Nullable IgniteLogger log, String msg, boolean quiet) {
         assert !F.isEmpty(msg);
 
-        log(log, e, msg, null, LogLevel.WARN, quite);
+        log(log, null, msg, null, LogLevel.WARN, quiet);
     }
 
     /**
      * Logs warning if needed.
      *
      * @param log Logger.
-     * @param e Error (optional).
      * @param longMsg Long message (or just message).
-     * @param shortMsg Short message for quite logging.
+     * @param shortMsg Short message for quiet logging.
      */
-    public static void warn(@Nullable IgniteLogger log, @Nullable Throwable e, String longMsg, @Nullable String shortMsg) {
+    public static void warn(@Nullable IgniteLogger log, String longMsg, @Nullable String shortMsg) {
         assert !F.isEmpty(longMsg);
 
-        log(log, e, longMsg, shortMsg, LogLevel.WARN, false);
+        log(log, null, longMsg, shortMsg, LogLevel.WARN, false);
     }
 
     /**
@@ -121,12 +118,12 @@
      *
      * @param log Logger.
      * @param msg Message.
-     * @param quite Print info anyway.
+     * @param quiet Print info anyway.
      */
-    public static void info(@Nullable IgniteLogger log, String msg, boolean quite) {
+    public static void info(@Nullable IgniteLogger log, String msg, boolean quiet) {
         assert !F.isEmpty(msg);
 
-        log(log, null, msg, null, LogLevel.INFO, quite);
+        log(log, null, msg, null, LogLevel.INFO, quiet);
     }
 
     /**
@@ -136,6 +133,8 @@
      * @param msg Message.
      */
     public static void info(@Nullable IgniteLogger log, String msg) {
+        assert !F.isEmpty(msg);
+
         info(log, msg, false);
     }
 
@@ -152,12 +151,12 @@
      * @param log Logger.
      * @param e Error (optional).
      * @param longMsg Long message (or just message).
-     * @param shortMsg Short message for quite logging.
+     * @param shortMsg Short message for quiet logging.
      * @param level Level where messages should appear.
      */
     @SuppressWarnings({"RedundantTypeArguments"})
-    private static void log(@Nullable IgniteLogger log, @Nullable Throwable e, String longMsg, @Nullable String shortMsg,
-        LogLevel level, boolean quiet) {
+    private static void log(@Nullable IgniteLogger log, @Nullable Throwable e, String longMsg,
+        @Nullable String shortMsg, LogLevel level, boolean quiet) {
         assert !F.isEmpty(longMsg);
 
         IgniteBiTuple<Class<? extends Throwable>, String> tup =
@@ -252,4 +251,4 @@
          */
         public abstract void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet);
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
index 7ac8b1e..df0f7d2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
@@ -4074,7 +4074,7 @@
     }
 
     /**
-     * Logs warning message in both verbose and quite modes.
+     * Logs warning message in both verbose and quiet modes.
      *
      * @param log Logger to use.
      * @param msg Message to log.
@@ -4084,7 +4084,7 @@
     }
 
     /**
-     * Logs warning message in both verbose and quite modes.
+     * Logs warning message in both verbose and quiet modes.
      *
      * @param log Logger to use.
      * @param shortMsg Short message.
@@ -4254,7 +4254,7 @@
     }
 
     /**
-     * Prints out the message in quite and info modes.
+     * Prints out the message in quiet and info modes.
      *
      * @param log Logger.
      * @param msg Message to print.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
index 3409341..0f7e020 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
@@ -33,6 +33,8 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 
 /**
@@ -53,8 +55,11 @@
     private static final AtomicIntegerFieldUpdater<GridCompoundFuture> LSNR_CALLS_UPD =
         AtomicIntegerFieldUpdater.newUpdater(GridCompoundFuture.class, "lsnrCalls");
 
-    /** Futures. */
-    protected final ArrayList<IgniteInternalFuture<T>> futs = new ArrayList<>();
+    /** Sync object */
+    protected final Object sync = new Object();
+
+    /** Possible values: null (no future), IgniteInternalFuture instance (single future) or List of futures  */
+    private volatile Object futs;
 
     /** Reducer. */
     @GridToStringInclude
@@ -154,9 +159,16 @@
      *
      * @return Collection of futures.
      */
+    @SuppressWarnings("unchecked")
     public Collection<IgniteInternalFuture<T>> futures() {
-        synchronized (futs) {
-            return new ArrayList<>(futs);
+        synchronized (sync) {
+            if(futs == null)
+                return Collections.emptyList();
+
+            if (futs instanceof IgniteInternalFuture)
+                return Collections.singletonList((IgniteInternalFuture<T>)futs);
+
+            return new ArrayList<>((Collection<IgniteInternalFuture<T>>)futs);
         }
     }
 
@@ -179,10 +191,10 @@
      */
     @SuppressWarnings("ForLoopReplaceableByForEach")
     public boolean hasPending() {
-        synchronized (futs) {
+        synchronized (sync) {
             // Avoid iterator creation and collection copy.
-            for (int i = 0; i < futs.size(); i++) {
-                IgniteInternalFuture<T> fut = futs.get(i);
+            for (int i = 0; i < futuresCount(); i++) {
+                IgniteInternalFuture<T> fut = future(i);
 
                 if (!fut.isDone())
                     return true;
@@ -197,11 +209,23 @@
      *
      * @param fut Future to add.
      */
+    @SuppressWarnings("unchecked")
     public void add(IgniteInternalFuture<T> fut) {
         assert fut != null;
 
-        synchronized (futs) {
-            futs.add(fut);
+        synchronized (sync) {
+            if (futs == null)
+                futs = fut;
+            else if (futs instanceof IgniteInternalFuture) {
+                Collection<IgniteInternalFuture> futs0 = new ArrayList<>(4);
+
+                futs0.add((IgniteInternalFuture)futs);
+                futs0.add(fut);
+
+                futs = futs0;
+            }
+            else
+                ((Collection<IgniteInternalFuture>)futs).add(fut);
         }
 
         fut.listen(this);
@@ -217,8 +241,17 @@
     }
 
     /**
-     * @return {@code True} if this future was initialized. Initialization happens when
-     *      {@link #markInitialized()} method is called on future.
+     * Clear futures.
+     */
+    protected void clear() {
+        synchronized (sync) {
+            futs = null;
+        }
+    }
+
+    /**
+     * @return {@code True} if this future was initialized. Initialization happens when {@link #markInitialized()}
+     * method is called on future.
      */
     public boolean initialized() {
         return initFlag == INIT_FLAG;
@@ -236,7 +269,7 @@
      * Check completeness of the future.
      */
     private void checkComplete() {
-        if (initialized() && !isDone() && lsnrCalls == futuresSize()) {
+        if (initialized() && !isDone() && lsnrCalls == futuresCount()) {
             try {
                 onDone(rdc != null ? rdc.reduce() : null);
             }
@@ -256,11 +289,38 @@
     }
 
     /**
+     * Returns future at the specified position in this list.
+     *
+     * @param idx - index index of the element to return
+     * @return Future.
+     */
+    @SuppressWarnings("unchecked")
+    protected IgniteInternalFuture<T> future(int idx) {
+        assert Thread.holdsLock(sync);
+        assert futs != null && idx >= 0 && idx < futuresCount();
+
+        if (futs instanceof IgniteInternalFuture) {
+            assert idx == 0;
+
+            return (IgniteInternalFuture<T>)futs;
+        }
+        else
+            return ((List<IgniteInternalFuture>)futs).get(idx);
+    }
+
+    /**
      * @return Futures size.
      */
-    protected int futuresSize() {
-        synchronized (futs) {
-            return futs.size();
+    @SuppressWarnings("unchecked")
+    protected int futuresCount() {
+        synchronized (sync) {
+            if (futs == null)
+                return 0;
+
+            if (futs instanceof IgniteInternalFuture)
+                return 1;
+
+            return ((Collection<IgniteInternalFuture>)futs).size();
         }
     }
 
@@ -271,11 +331,11 @@
             "cancelled", isCancelled(),
             "err", error(),
             "futs",
-                F.viewReadOnly(futures(), new C1<IgniteInternalFuture<T>, String>() {
-                    @Override public String apply(IgniteInternalFuture<T> f) {
-                        return Boolean.toString(f.isDone());
-                    }
-                })
+            F.viewReadOnly(futures(), new C1<IgniteInternalFuture<T>, String>() {
+                @Override public String apply(IgniteInternalFuture<T> f) {
+                    return Boolean.toString(f.isDone());
+                }
+            })
         );
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNativeLoader.java b/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNativeLoader.java
index 2771d28..02c4de5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNativeLoader.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNativeLoader.java
@@ -150,7 +150,7 @@
 
             try {
                 if (log != null)
-                    LT.warn(log, null, "Failed to load 'igniteshmem' library from classpath. Will try to load it from IGNITE_HOME.");
+                    LT.warn(log, "Failed to load 'igniteshmem' library from classpath. Will try to load it from IGNITE_HOME.");
 
                 String igniteHome = X.resolveIgniteHome();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryServerEndpoint.java b/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryServerEndpoint.java
index 94c3820..178e608 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryServerEndpoint.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryServerEndpoint.java
@@ -304,13 +304,13 @@
                     String msg = "Failed to process incoming connection (most probably, shared memory " +
                         "rest endpoint has been configured by mistake).";
 
-                    LT.warn(log, null, msg);
+                    LT.warn(log, msg);
 
                     sendErrorResponse(out, e);
                 }
                 catch (IpcOutOfSystemResourcesException e) {
                     if (!omitOutOfResourcesWarn)
-                        LT.warn(log, null, OUT_OF_RESOURCES_MSG);
+                        LT.warn(log, OUT_OF_RESOURCES_MSG);
 
                     sendErrorResponse(out, e);
                 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridConnectionBytesVerifyFilter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridConnectionBytesVerifyFilter.java
index 13d7ca7..213fd8d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridConnectionBytesVerifyFilter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridConnectionBytesVerifyFilter.java
@@ -115,7 +115,7 @@
             else {
                 ses.close();
 
-                LT.warn(log, null, "Unknown connection detected (is some other software connecting to this " +
+                LT.warn(log, "Unknown connection detected (is some other software connecting to this " +
                     "Ignite port?) [rmtAddr=" + ses.remoteAddress() + ", locAddr=" + ses.localAddress() + ']');
             }
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioCodecFilter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioCodecFilter.java
index a2f543d..7083ccf 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioCodecFilter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioCodecFilter.java
@@ -110,7 +110,7 @@
                         if (directMode)
                             return;
 
-                        LT.warn(log, null, "Parser returned null but there are still unread data in input buffer (bug in " +
+                        LT.warn(log, "Parser returned null but there are still unread data in input buffer (bug in " +
                             "parser code?) [parser=" + parser + ", ses=" + ses + ']');
 
                         input.position(input.limit());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
index 8a43e29..a3a74e3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
@@ -158,7 +158,7 @@
             head.onExceptionCaught(ses, e);
         }
         catch (Exception ex) {
-            LT.warn(log, ex, "Failed to forward GridNioException to filter chain [ses=" + ses + ", e=" + e + ']');
+            LT.error(log, ex, "Failed to forward GridNioException to filter chain [ses=" + ses + ", e=" + e + ']');
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
index 24b8fad..c8e2e0b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
@@ -768,7 +768,7 @@
                 filterChain.onMessageReceived(ses, readBuf);
 
                 if (readBuf.remaining() > 0) {
-                    LT.warn(log, null, "Read buffer contains data after filter chain processing (will discard " +
+                    LT.warn(log, "Read buffer contains data after filter chain processing (will discard " +
                         "remaining bytes) [ses=" + ses + ", remainingCnt=" + readBuf.remaining() + ']');
 
                     readBuf.clear();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridSelectorNioSessionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridSelectorNioSessionImpl.java
index 0ba6af2..63c9845 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridSelectorNioSessionImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridSelectorNioSessionImpl.java
@@ -227,7 +227,7 @@
 
             if (recovery != null) {
                 if (!recovery.add(last)) {
-                    LT.warn(log, null, "Unacknowledged messages queue size overflow, will attempt to reconnect " +
+                    LT.warn(log, "Unacknowledged messages queue size overflow, will attempt to reconnect " +
                         "[remoteAddr=" + remoteAddress() +
                         ", queueLimit=" + recovery.queueLimit() + ']');
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafeLru.java b/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafeLru.java
index aaff4f9..ea65217 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafeLru.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafeLru.java
@@ -28,8 +28,7 @@
 /**
  * Striped LRU queue.
  */
-@SuppressWarnings("ForLoopReplaceableByForEach")
-class GridUnsafeLru {
+@SuppressWarnings("ForLoopReplaceableByForEach") class GridUnsafeLru {
     /** Number of stripes. */
     private final short cnt;
 
@@ -47,6 +46,9 @@
     /** Current round-robin remove stripe index. */
     private final AtomicInteger rmvIdx;
 
+    /** Max stripe index count. */
+    private final int maxIdxCnt;
+
     /** Released flag. */
     private AtomicBoolean released = new AtomicBoolean(false);
 
@@ -68,6 +70,8 @@
 
         addIdx = new AtomicInteger();
         rmvIdx = new AtomicInteger(cnt / 2);
+
+        maxIdxCnt = cnt - 1;
     }
 
     /**
@@ -156,7 +160,7 @@
      * @throws GridOffHeapOutOfMemoryException If failed.
      */
     long offer(int part, long addr, int hash) throws GridOffHeapOutOfMemoryException {
-        return lrus[addIdx.getAndIncrement() % cnt].offer(part, addr, hash);
+        return lrus[incrementAndGet(addIdx, maxIdxCnt)].offer(part, addr, hash);
     }
 
     /**
@@ -165,7 +169,7 @@
      * @return Queue node address.
      */
     long prePoll() {
-        int idx = rmvIdx.getAndIncrement();
+        int idx = incrementAndGet(rmvIdx, maxIdxCnt);
 
         // Must try to poll from each LRU.
         for (int i = 0; i < lrus.length; i++) {
@@ -180,6 +184,7 @@
 
     /**
      * Removes polling node from the queue.
+     *
      * @param qAddr Queue node address.
      */
     void poll(long qAddr) {
@@ -215,6 +220,23 @@
         }
     }
 
+    /**
+     * Atomically increments the given value by one, re-starting from 0 when the specified maximum is reached.
+     *
+     * @param value Value to increment.
+     * @param max Maximum after reaching which the value is reset to 0.
+     * @return Incremented value.
+     */
+    private int incrementAndGet(AtomicInteger value, int max) {
+        while (true) {
+            int cur = value.get();
+            int next = cur == max ? 0 : cur + 1;
+
+            if (value.compareAndSet(cur, next))
+                return next;
+        }
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(GridUnsafeLru.class, this);
diff --git a/modules/core/src/main/java/org/apache/ignite/services/Service.java b/modules/core/src/main/java/org/apache/ignite/services/Service.java
index 60b977d..e82f6c3 100644
--- a/modules/core/src/main/java/org/apache/ignite/services/Service.java
+++ b/modules/core/src/main/java/org/apache/ignite/services/Service.java
@@ -53,21 +53,21 @@
  * Consecutively, this service can be deployed as follows:
  * <pre name="code" class="java">
  * ...
- * GridServices svcs = grid.services();
+ * IgniteServices svcs = ignite.services();
  *
- * svcs.deployClusterSingleton("mySingleton", new MyGridService());
+ * svcs.deployClusterSingleton("mySingleton", new MyIgniteService());
  * </pre>
  * Or from grid configuration on startup:
  * <pre name="code" class="java">
  * IgniteConfiguration gridCfg = new IgniteConfiguration();
  *
- * GridServiceConfiguration svcCfg = new GridServiceConfiguration();
+ * IgniteServiceConfiguration svcCfg = new IgniteServiceConfiguration();
  *
  * // Configuration for cluster-singleton service.
  * svcCfg.setName("mySingleton");
  * svcCfg.setMaxPerNodeCount(1);
  * svcCfg.setTotalCount(1);
- * svcCfg.setService(new MyGridService());
+ * svcCfg.setService(new MyIgniteService());
  *
  * gridCfg.setServiceConfiguration(svcCfg);
  * ...
@@ -88,7 +88,7 @@
      * {@code cancel} methods on {@link org.apache.ignite.IgniteServices} API are called.
      * <p>
      * Note that Ignite cannot guarantee that the service exits from {@link #execute(ServiceContext)}
-     * method whenever {@code cancel(GridServiceContext)} method is called. It is up to the user to
+     * method whenever {@code cancel(ServiceContext)} method is called. It is up to the user to
      * make sure that the service code properly reacts to cancellations.
      *
      * @param ctx Service execution context.
@@ -117,4 +117,4 @@
      *      {@link org.apache.ignite.IgniteServices#cancel(String)} method will be called.
      */
     public void execute(ServiceContext ctx) throws Exception;
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
index 767490f..1fe437c 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
@@ -331,7 +331,7 @@
     private final GridNioServerListener<Message> srvLsnr =
         new GridNioServerListenerAdapter<Message>() {
             @Override public void onSessionWriteTimeout(GridNioSession ses) {
-                LT.warn(log, null, "Communication SPI Session write timed out (consider increasing " +
+                LT.warn(log, "Communication SPI Session write timed out (consider increasing " +
                     "'socketWriteTimeout' " + "configuration property) [remoteAddr=" + ses.remoteAddress() +
                     ", writeTimeout=" + sockWriteTimeout + ']');
 
@@ -2146,9 +2146,9 @@
             catch (IgniteCheckedException e) {
                 if (e.hasCause(IpcOutOfSystemResourcesException.class))
                     // Has cause or is itself the IpcOutOfSystemResourcesException.
-                    LT.warn(log, null, OUT_OF_RESOURCES_TCP_MSG);
+                    LT.warn(log, OUT_OF_RESOURCES_TCP_MSG);
                 else if (getSpiContext().node(node.id()) != null)
-                    LT.warn(log, null, e.getMessage());
+                    LT.warn(log, e.getMessage());
                 else if (log.isDebugEnabled())
                     log.debug("Failed to establish shared memory connection with local node (node has left): " +
                         node.id());
@@ -2510,11 +2510,11 @@
                     boolean failureDetThrReached = timeoutHelper.checkFailureTimeoutReached(e);
 
                     if (failureDetThrReached)
-                        LT.warn(log, null, "Connect timed out (consider increasing 'failureDetectionTimeout' " +
+                        LT.warn(log, "Connect timed out (consider increasing 'failureDetectionTimeout' " +
                             "configuration property) [addr=" + addr + ", failureDetectionTimeout=" +
                             failureDetectionTimeout() + ']');
                     else if (X.hasCause(e, SocketTimeoutException.class))
-                        LT.warn(log, null, "Connect timed out (consider increasing 'connTimeout' " +
+                        LT.warn(log, "Connect timed out (consider increasing 'connTimeout' " +
                             "configuration property) [addr=" + addr + ", connTimeout=" + connTimeout + ']');
 
                     if (errs == null)
@@ -2545,7 +2545,7 @@
             assert errs != null;
 
             if (X.hasCause(errs, ConnectException.class))
-                LT.warn(log, null, "Failed to connect to a remote node " +
+                LT.warn(log, "Failed to connect to a remote node " +
                     "(make sure that destination node is alive and " +
                     "operating system firewall is disabled on local and remote hosts) " +
                     "[addrs=" + addrs + ']');
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
index 81f1806..b0279c2 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
@@ -64,7 +64,6 @@
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
-import org.apache.ignite.marshaller.MarshallerUtils;
 import org.apache.ignite.spi.IgniteSpiContext;
 import org.apache.ignite.spi.IgniteSpiException;
 import org.apache.ignite.spi.IgniteSpiOperationTimeoutHelper;
@@ -483,7 +482,7 @@
                     if (timeout > 0 && (U.currentTimeMillis() - startTime) > timeout)
                         return null;
 
-                    LT.warn(log, null, "IP finder returned empty addresses list. " +
+                    LT.warn(log, "IP finder returned empty addresses list. " +
                             "Please check IP finder configuration" +
                             (spi.ipFinder instanceof TcpDiscoveryMulticastIpFinder ?
                                 " and make sure multicast works on your network. " : ". ") +
@@ -553,7 +552,7 @@
                 if (timeout > 0 && (U.currentTimeMillis() - startTime) > timeout)
                     return null;
 
-                LT.warn(log, null, "Failed to connect to any address from IP finder (will retry to join topology " +
+                LT.warn(log, "Failed to connect to any address from IP finder (will retry to join topology " +
                     "every 2 secs): " + toOrderedList(addrs0), true);
 
                 Thread.sleep(2000);
@@ -917,7 +916,7 @@
                             ClassNotFoundException clsNotFoundEx = X.cause(e, ClassNotFoundException.class);
 
                             if (clsNotFoundEx != null)
-                                LT.warn(log, null, "Failed to read message due to ClassNotFoundException " +
+                                LT.warn(log, "Failed to read message due to ClassNotFoundException " +
                                     "(make sure same versions of all classes are available on all nodes) " +
                                     "[rmtNodeId=" + rmtNodeId + ", err=" + clsNotFoundEx.getMessage() + ']');
                             else
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
index 78a5f39..ab20339 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
@@ -516,7 +516,7 @@
         boolean res = pingNode(node);
 
         if (!res && !node.isClient() && nodeAlive(nodeId)) {
-            LT.warn(log, null, "Failed to ping node (status check will be initiated): " + nodeId);
+            LT.warn(log, "Failed to ping node (status check will be initiated): " + nodeId);
 
             msgWorker.addMessage(new TcpDiscoveryStatusCheckMessage(locNode, node.id()));
         }
@@ -906,7 +906,7 @@
                         U.addressesAsString(msg.addresses(), msg.hostNames()) + ']');
                 }
                 else
-                    LT.warn(log, null, "Node has not been connected to topology and will repeat join process. " +
+                    LT.warn(log, "Node has not been connected to topology and will repeat join process. " +
                         "Check remote nodes logs for possible error messages. " +
                         "Note that large topology may require significant time to start. " +
                         "Increase 'TcpDiscoverySpi.networkTimeout' configuration property " +
@@ -1026,7 +1026,7 @@
                 }
 
                 if (e != null && X.hasCause(e, ConnectException.class)) {
-                    LT.warn(log, null, "Failed to connect to any address from IP finder " +
+                    LT.warn(log, "Failed to connect to any address from IP finder " +
                         "(make sure IP finder addresses are correct and firewalls are disabled on all host machines): " +
                         toOrderedList(addrs), true);
                 }
@@ -2905,7 +2905,7 @@
                     }
                 }
 
-                LT.warn(log, null, "Local node has detected failed nodes and started cluster-wide procedure. " +
+                LT.warn(log, "Local node has detected failed nodes and started cluster-wide procedure. " +
                         "To speed up failure detection please see 'Failure Detection' section under javadoc" +
                         " for 'TcpDiscoverySpi'");
             }
@@ -2990,7 +2990,7 @@
                         "[locNodeAddrs=" + U.addressesAsString(locNode) +
                         ", rmtNodeAddrs=" + U.addressesAsString(node) + ']';
 
-                    LT.warn(log, null, errMsg);
+                    LT.warn(log, errMsg);
 
                     // Always output in debug.
                     if (log.isDebugEnabled())
@@ -3043,7 +3043,7 @@
                         }
 
                         // Output warning.
-                        LT.warn(log, null, "Ignoring join request from node (duplicate ID) [node=" + node +
+                        LT.warn(log, "Ignoring join request from node (duplicate ID) [node=" + node +
                             ", existingNode=" + existingNode + ']');
 
                         // Ignore join request.
@@ -3098,8 +3098,7 @@
 
                         if (subj == null) {
                             // Node has not pass authentication.
-                            LT.warn(log, null,
-                                "Authentication failed [nodeId=" + node.id() +
+                            LT.warn(log, "Authentication failed [nodeId=" + node.id() +
                                     ", addrs=" + U.addressesAsString(node) + ']',
                                 "Authentication failed [nodeId=" + U.id8(node.id()) + ", addrs=" +
                                     U.addressesAsString(node) + ']');
@@ -3128,8 +3127,7 @@
                         else {
                             if (!(subj instanceof Serializable)) {
                                 // Node has not pass authentication.
-                                LT.warn(log, null,
-                                    "Authentication subject is not Serializable [nodeId=" + node.id() +
+                                LT.warn(log, "Authentication subject is not Serializable [nodeId=" + node.id() +
                                         ", addrs=" + U.addressesAsString(node) + ']',
                                     "Authentication subject is not Serializable [nodeId=" + U.id8(node.id()) +
                                         ", addrs=" +
@@ -3199,7 +3197,7 @@
                                     return;
                                 }
 
-                                LT.warn(log, null, err.message());
+                                LT.warn(log, err.message());
 
                                 // Always output in debug.
                                 if (log.isDebugEnabled())
@@ -3240,7 +3238,7 @@
                                     ", rmtNodeAddrs=" + U.addressesAsString(node) +
                                     ", locNodeId=" + locNode.id() + ", rmtNodeId=" + msg.creatorNodeId() + ']';
 
-                                LT.warn(log, null, errMsg);
+                                LT.warn(log, errMsg);
 
                                 // Always output in debug.
                                 if (log.isDebugEnabled())
@@ -3528,7 +3526,7 @@
          * @param sndMsg Message to send.
          */
         private void nodeCheckError(TcpDiscoveryNode node, String errMsg, String sndMsg) {
-            LT.warn(log, null, errMsg);
+            LT.warn(log, errMsg);
 
             // Always output in debug.
             if (log.isDebugEnabled())
@@ -3811,8 +3809,7 @@
 
                             if (!permissionsEqual(coordSubj.subject().permissions(), subj.subject().permissions())) {
                                 // Node has not pass authentication.
-                                LT.warn(log, null,
-                                    "Authentication failed [nodeId=" + node.id() +
+                                LT.warn(log, "Authentication failed [nodeId=" + node.id() +
                                         ", addrs=" + U.addressesAsString(node) + ']',
                                     "Authentication failed [nodeId=" + U.id8(node.id()) + ", addrs=" +
                                         U.addressesAsString(node) + ']');
@@ -5267,7 +5264,7 @@
                                     "[rmtAddr=" + sock.getRemoteSocketAddress() +
                                     ", locAddr=" + sock.getLocalSocketAddress() + ']');
 
-                            LT.warn(log, null, "Failed to read magic header (too few bytes received) [rmtAddr=" +
+                            LT.warn(log, "Failed to read magic header (too few bytes received) [rmtAddr=" +
                                 sock.getRemoteSocketAddress() + ", locAddr=" + sock.getLocalSocketAddress() + ']');
 
                             return;
@@ -5283,7 +5280,7 @@
                                 "[rmtAddr=" + sock.getRemoteSocketAddress() +
                                 ", locAddr=" + sock.getLocalSocketAddress() + ']');
 
-                        LT.warn(log, null, "Unknown connection detected (is some other software connecting to " +
+                        LT.warn(log, "Unknown connection detected (is some other software connecting to " +
                             "this Ignite port?" +
                             (!spi.isSslEnabled() ? " missing SSL configuration on remote node?" : "" ) +
                             ") [rmtAddr=" + sock.getInetAddress() + ']', true);
@@ -5403,7 +5400,7 @@
                         U.error(log, "Caught exception on handshake [err=" + e +", sock=" + sock + ']', e);
 
                     if (X.hasCause(e, SSLException.class) && spi.isSslEnabled() && !spi.isNodeStopping0())
-                        LT.warn(log, null, "Failed to initialize connection " +
+                        LT.warn(log, "Failed to initialize connection " +
                             "(missing SSL configuration on remote node?) " +
                             "[rmtAddr=" + sock.getInetAddress() + ']', true);
                     else if ((X.hasCause(e, ObjectStreamException.class) || !sock.isClosed())
@@ -5432,12 +5429,12 @@
                     onException("Caught exception on handshake [err=" + e +", sock=" + sock + ']', e);
 
                     if (e.hasCause(SocketTimeoutException.class))
-                        LT.warn(log, null, "Socket operation timed out on handshake " +
+                        LT.warn(log, "Socket operation timed out on handshake " +
                             "(consider increasing 'networkTimeout' configuration property) " +
                             "[netTimeout=" + spi.netTimeout + ']');
 
                     else if (e.hasCause(ClassNotFoundException.class))
-                        LT.warn(log, null, "Failed to read message due to ClassNotFoundException " +
+                        LT.warn(log, "Failed to read message due to ClassNotFoundException " +
                             "(make sure same versions of all classes are available on all nodes) " +
                             "[rmtAddr=" + sock.getRemoteSocketAddress() +
                             ", err=" + X.cause(e, ClassNotFoundException.class).getMessage() + ']');
@@ -5667,7 +5664,7 @@
                             return;
 
                         if (e.hasCause(ClassNotFoundException.class))
-                            LT.warn(log, null, "Failed to read message due to ClassNotFoundException " +
+                            LT.warn(log, "Failed to read message due to ClassNotFoundException " +
                                 "(make sure same versions of all classes are available on all nodes) " +
                                 "[rmtNodeId=" + nodeId +
                                 ", err=" + X.cause(e, ClassNotFoundException.class).getMessage() + ']');
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java
index 30b83e5..1d63852 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java
@@ -303,7 +303,7 @@
      */
     protected boolean checkAckTimeout(long ackTimeout) {
         if (ackTimeout > spi.getMaxAckTimeout()) {
-            LT.warn(log, null, "Acknowledgement timeout is greater than maximum acknowledgement timeout " +
+            LT.warn(log, "Acknowledgement timeout is greater than maximum acknowledgement timeout " +
                 "(consider increasing 'maxAckTimeout' configuration property) " +
                 "[ackTimeout=" + ackTimeout + ", maxAckTimeout=" + spi.getMaxAckTimeout() + ']');
 
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
index a8704e7..45933e1 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
@@ -1471,7 +1471,7 @@
         }
         catch (IOException | IgniteCheckedException e) {
             if (X.hasCause(e, SocketTimeoutException.class))
-                LT.warn(log, null, "Timed out waiting for message to be read (most probably, the reason is " +
+                LT.warn(log, "Timed out waiting for message to be read (most probably, the reason is " +
                     "in long GC pauses on remote node) [curTimeout=" + timeout + ']');
 
             throw e;
@@ -1511,7 +1511,7 @@
             return res;
         }
         catch (SocketTimeoutException e) {
-            LT.warn(log, null, "Timed out waiting for message delivery receipt (most probably, the reason is " +
+            LT.warn(log, "Timed out waiting for message delivery receipt (most probably, the reason is " +
                 "in long GC pauses on remote node; consider tuning GC and increasing 'ackTimeout' " +
                 "configuration property). Will retry to send message with increased timeout. " +
                 "Current timeout: " + timeout + '.');
@@ -1575,7 +1575,7 @@
                     res.add(resolved);
             }
             catch (UnknownHostException ignored) {
-                LT.warn(log, null, "Failed to resolve address from IP finder (host is unknown): " + addr);
+                LT.warn(log, "Failed to resolve address from IP finder (host is unknown): " + addr);
 
                 // Add address in any case.
                 res.add(addr);
@@ -2045,7 +2045,7 @@
                 // Close socket - timeout occurred.
                 U.closeQuiet(sock);
 
-                LT.warn(log, null, "Socket write has timed out (consider increasing " +
+                LT.warn(log, "Socket write has timed out (consider increasing " +
                     (failureDetectionTimeoutEnabled() ?
                     "'IgniteConfiguration.failureDetectionTimeout' configuration property) [" +
                     "failureDetectionTimeout=" + failureDetectionTimeout() + ']' :
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/multicast/TcpDiscoveryMulticastIpFinder.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/multicast/TcpDiscoveryMulticastIpFinder.java
index e96abe9..8fe8a65 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/multicast/TcpDiscoveryMulticastIpFinder.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/multicast/TcpDiscoveryMulticastIpFinder.java
@@ -598,7 +598,7 @@
                                 addrRes = new AddressResponse(data);
                             }
                             catch (IgniteCheckedException e) {
-                                LT.warn(log, e, "Failed to deserialize multicast response.");
+                                LT.error(log, e, "Failed to deserialize multicast response.");
 
                                 continue;
                             }
@@ -876,7 +876,7 @@
                 }
                 catch (IOException e) {
                     if (!isInterrupted()) {
-                        LT.warn(log, e, "Failed to send/receive address message (will try to reconnect).");
+                        LT.error(log, e, "Failed to send/receive address message (will try to reconnect).");
 
                         synchronized (this) {
                             U.close(sock);
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/swapspace/file/FileSwapSpaceSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/swapspace/file/FileSwapSpaceSpi.java
index f0ac7bc..e277868 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/swapspace/file/FileSwapSpaceSpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/swapspace/file/FileSwapSpaceSpi.java
@@ -333,12 +333,7 @@
 
     /** {@inheritDoc} */
     @Override public void clear(@Nullable String spaceName) throws IgniteSpiException {
-        Space space = space(spaceName, false);
-
-        if (space == null)
-            return;
-
-        space.clear();
+        destruct(spaceName);
 
         notifyListener(EVT_SWAP_SPACE_CLEARED, spaceName);
     }
@@ -630,7 +625,7 @@
      * @throws org.apache.ignite.spi.IgniteSpiException In case of error.
      */
     @Nullable private Space space(@Nullable String name, boolean create) throws IgniteSpiException {
-        String masked = name != null ? name : DFLT_SPACE_NAME;
+        String masked = maskNull(name);
 
         assert masked != null;
 
@@ -652,6 +647,36 @@
     }
 
     /**
+     * Destructs space.
+     *
+     * @param spaceName space name.
+     * */
+    private void destruct(@Nullable String spaceName) {
+        String masked = maskNull(spaceName);
+
+        Space space = spaces.remove(masked);
+
+        if (space != null) {
+            try {
+                space.stop();
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                U.error(log, "Interrupted.", e);
+            }
+        }
+    }
+
+    /**
+     * Masks null space name with default space name.
+     *
+     * @param spaceName Space name.
+     * @return Space name or default space name if space name is null.
+     * */
+    private static String maskNull(String spaceName) {
+        return spaceName != null ? spaceName : DFLT_SPACE_NAME;
+    }
+
+    /**
      * Validates space name.
      *
      * @param name Space name.
@@ -1623,18 +1648,6 @@
         }
 
         /**
-         * Clears space.
-         *
-         * @throws org.apache.ignite.spi.IgniteSpiException If failed.
-         */
-        public void clear() throws IgniteSpiException {
-            Iterator<Map.Entry<SwapKey, byte[]>> iter = entriesIterator();
-
-            while (iter.hasNext())
-                remove(iter.next().getKey(), false);
-        }
-
-        /**
          * Stops space.
          *
          * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted.
@@ -1931,4 +1944,4 @@
             };
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/cache/affinity/AbstractAffinityFunctionSelfTest.java b/modules/core/src/test/java/org/apache/ignite/cache/affinity/AbstractAffinityFunctionSelfTest.java
index 878d7d1..ee5b65c 100644
--- a/modules/core/src/test/java/org/apache/ignite/cache/affinity/AbstractAffinityFunctionSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/cache/affinity/AbstractAffinityFunctionSelfTest.java
@@ -106,6 +106,22 @@
     /**
      * @throws Exception If failed.
      */
+    public void testNullKeyForPartitionCalculation() throws Exception {
+        AffinityFunction aff = affinityFunction();
+
+        try {
+            aff.partition(null);
+
+            fail("Should throw IllegalArgumentException due to NULL affinity key.");
+        } catch (IllegalArgumentException e) {
+            e.getMessage().contains("Null key is passed for a partition calculation. " +
+                "Make sure that an affinity key that is used is initialized properly.");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
     protected void checkNodeRemoved(int backups) throws Exception {
         checkNodeRemoved(backups, 1, 1);
     }
@@ -290,4 +306,4 @@
     private static int deviation(int val, int ideal) {
         return Math.round(Math.abs(((float)val - ideal) / ideal * 100));
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteLocalNodeMapBeforeStartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteLocalNodeMapBeforeStartTest.java
new file mode 100644
index 0000000..5f22399
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteLocalNodeMapBeforeStartTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal;
+
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.lifecycle.LifecycleBean;
+import org.apache.ignite.lifecycle.LifecycleEventType;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.lifecycle.LifecycleEventType.AFTER_NODE_START;
+import static org.apache.ignite.lifecycle.LifecycleEventType.AFTER_NODE_STOP;
+import static org.apache.ignite.lifecycle.LifecycleEventType.BEFORE_NODE_START;
+import static org.apache.ignite.lifecycle.LifecycleEventType.BEFORE_NODE_STOP;
+
+/**
+ *
+ */
+public class IgniteLocalNodeMapBeforeStartTest extends GridCommonAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNodeLocalMapFromLifecycleBean() throws Exception {
+        IgniteConfiguration cfg = getConfiguration(getTestGridName(0));
+
+        LifecycleBeanTest lifecycleBean = new LifecycleBeanTest();
+
+        // Provide lifecycle bean to configuration.
+        cfg.setLifecycleBeans(lifecycleBean);
+
+        try (Ignite ignite  = Ignition.start(cfg)) {
+            // No-op.
+        }
+
+        assertTrue(lifecycleBean.evtQueue.size() == 4);
+        assertTrue(lifecycleBean.evtQueue.poll() == BEFORE_NODE_START);
+        assertTrue(lifecycleBean.evtQueue.poll() == AFTER_NODE_START);
+        assertTrue(lifecycleBean.evtQueue.poll() == BEFORE_NODE_STOP);
+        assertTrue(lifecycleBean.evtQueue.poll() == AFTER_NODE_STOP);
+    }
+
+    /**
+     * Simple {@link LifecycleBean} implementation.
+     */
+    private static class LifecycleBeanTest implements LifecycleBean {
+        /** Auto-inject ignite instance. */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** Event queue. */
+        ConcurrentLinkedQueue<LifecycleEventType> evtQueue = new ConcurrentLinkedQueue<>();
+
+        /** {@inheritDoc} */
+        @Override public void onLifecycleEvent(LifecycleEventType evt) {
+            evtQueue.add(evt);
+
+            // check nodeLocalMap is not locked
+            ConcurrentMap map = ignite.cluster().nodeLocalMap();
+
+            assertNotNull(map);
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheOffHeapCleanupTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheOffHeapCleanupTest.java
new file mode 100644
index 0000000..ae94073
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheOffHeapCleanupTest.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheMemoryMode;
+import org.apache.ignite.cache.eviction.EvictionPolicy;
+import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheMemoryMode.OFFHEAP_TIERED;
+import static org.apache.ignite.cache.CacheMemoryMode.OFFHEAP_VALUES;
+import static org.apache.ignite.cache.CacheMemoryMode.ONHEAP_TIERED;
+
+/**
+ * Check offheap allocations are freed after cache destroy.
+ */
+public class GridCacheOffHeapCleanupTest extends GridCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final String CACHE_NAME = "testCache";
+
+    /** Memory mode. */
+    private CacheMemoryMode memoryMode;
+
+    /** Eviction policy. */
+    private EvictionPolicy evictionPlc;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder);
+
+        return cfg;
+    }
+
+    /**
+     * Checks offheap resources are freed after cache destroy - ONHEAP_TIERED memory mode
+     *
+     * @throws Exception If failed.
+     */
+    public void testCleanupOffheapAfterCacheDestroyOnheapTiered() throws Exception {
+        memoryMode = ONHEAP_TIERED;
+
+        FifoEvictionPolicy evictionPlc0 = new FifoEvictionPolicy();
+        evictionPlc0.setMaxSize(1);
+
+        evictionPlc = evictionPlc0;
+
+        checkCleanupOffheapAfterCacheDestroy();
+    }
+
+    /**
+     * Checks offheap resources are freed after cache destroy - OFFHEAP_TIERED memory mode
+     *
+     * @throws Exception If failed.
+     */
+    public void testCleanupOffheapAfterCacheDestroyOffheapTiered() throws Exception {
+        memoryMode = OFFHEAP_TIERED;
+        evictionPlc = null;
+
+        checkCleanupOffheapAfterCacheDestroy();
+    }
+
+    /**
+     * TODO: IGNITE-2714.
+     *
+     * Checks offheap resources are freed after cache destroy - OFFHEAP_VALUES memory mode
+     *
+     * @throws Exception If failed.
+     */
+    public void _testCleanupOffheapAfterCacheDestroyOffheapValues() throws Exception {
+        memoryMode = OFFHEAP_VALUES;
+        evictionPlc = null;
+
+        try (Ignite g = startGrid(0)) {
+            IgniteCache<Integer, String> cache = g.getOrCreateCache(createCacheConfiguration());
+
+            cache.put(1, "value_1");
+            cache.put(2, "value_2");
+
+            GridCacheContext ctx =  GridTestUtils.cacheContext(cache);
+            GridUnsafeMemory unsafeMemory = ctx.unsafeMemory();
+
+            g.destroyCache(null);
+
+            if (unsafeMemory != null)
+                assertEquals("Unsafe memory not freed", 0, unsafeMemory.allocatedSize());
+        }
+    }
+
+    /**
+     * Creates cache configuration.
+     *
+     * @return cache configuration.
+     * */
+    private CacheConfiguration<Integer, String> createCacheConfiguration() {
+        CacheConfiguration<Integer, String> ccfg = new CacheConfiguration<>();
+
+        ccfg.setName(CACHE_NAME);
+        ccfg.setOffHeapMaxMemory(0);
+        ccfg.setMemoryMode(memoryMode);
+        ccfg.setEvictionPolicy(evictionPlc);
+
+        return ccfg;
+    }
+
+    /**
+     * Check offheap resources are freed after cache destroy.
+     *
+     * @throws Exception If failed.
+     */
+    private void checkCleanupOffheapAfterCacheDestroy() throws Exception {
+        final String spaceName = "gg-swap-cache-" + CACHE_NAME;
+
+        try (Ignite g = startGrid(0)) {
+            checkOffheapAllocated(spaceName, false);
+
+            IgniteCache<Integer, String> cache = g.getOrCreateCache(createCacheConfiguration());
+
+            cache.put(1, "value_1");
+            cache.put(2, "value_2");
+
+            checkOffheapAllocated(spaceName, true);
+
+            g.destroyCache(cache.getName());
+
+            checkOffheapAllocated(spaceName, false);
+        }
+    }
+
+    /**
+     * Check is offheap allocated for given space name using internal API.
+     *
+     * @param spaceName Space name.
+     * @param allocated true, if we expected that offheap is allocated; false, otherwise.
+     * @throws Exception If failed.
+     * */
+    private void checkOffheapAllocated(String spaceName, boolean allocated) throws Exception {
+        long offheapSize = grid(0).context().offheap().allocatedSize(spaceName);
+
+        assertEquals("Unexpected offheap allocated size", allocated, (offheapSize >= 0));
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheSwapCleanupTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheSwapCleanupTest.java
new file mode 100644
index 0000000..5835ef0
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheSwapCleanupTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.managers.swapspace.GridSwapSpaceManager;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.spi.swapspace.file.FileSwapSpaceSpi;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Check swap is cleaned after cache destroy.
+ */
+public class GridCacheSwapCleanupTest extends GridCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Cache name. */
+    private static final String CACHE_NAME = "swapCleanupCache";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        cfg.setSwapSpaceSpi(new FileSwapSpaceSpi());
+
+        return cfg;
+    }
+
+    /**
+     * Creates cache configuration.
+     *
+     * @return Cache configuration.
+     * */
+    private CacheConfiguration createCacheConfiguration() {
+        CacheConfiguration ccfg = new CacheConfiguration();
+
+        ccfg.setName(CACHE_NAME);
+        ccfg.setEvictionPolicy(new LruEvictionPolicy(10));
+        ccfg.setSwapEnabled(true);
+
+        return ccfg;
+    }
+
+    /**
+     * Checks swap is cleaned after cache destroy.
+     *
+     * @throws Exception If failed.
+     * */
+    public void testSwapCleanupAfterCacheDestroy() throws Exception {
+        try (Ignite g = startGrid()) {
+            for (int iter = 0; iter < 3; iter++) {
+                IgniteCache cache = g.getOrCreateCache(createCacheConfiguration());
+
+                for (int i = 0; i < 20; i++) {
+                    assertNull(cache.get(i));
+
+                    cache.put(i, i);
+                }
+
+                String spaceName = CU.swapSpaceName(internalCache(cache).context());
+
+                GridSwapSpaceManager swapSpaceMgr = ((IgniteEx)g).context().swap();
+
+                assertEquals(10, swapSpaceMgr.swapKeys(spaceName));
+
+                g.destroyCache(cache.getName());
+
+                assertEquals(0, swapSpaceMgr.swapKeys(spaceName));
+                assertEquals(0, swapSpaceMgr.swapSize(spaceName));
+            }
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCreateRestartSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCreateRestartSelfTest.java
index e8e66c4..681636a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCreateRestartSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCreateRestartSelfTest.java
@@ -71,8 +71,6 @@
      * @throws Exception If failed.
      */
     public void testStopOriginatingNode() throws Exception {
-        fail("https://issues.apache.org/jira/browse/IGNITE-1690");
-
         startGrids(NODES);
 
         ThreadLocalRandom rnd = ThreadLocalRandom.current();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartSelfTest.java
index c9cd750..48e06ee 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartSelfTest.java
@@ -17,7 +17,9 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.CountDownLatch;
@@ -181,7 +183,8 @@
                 info("Succeeded: " + System.identityHashCode(fut));
 
                 succeeded++;
-            } catch (IgniteCheckedException e) {
+            }
+            catch (IgniteCheckedException e) {
                 info(e.getMessage());
 
                 failed++;
@@ -246,7 +249,8 @@
                 info("Succeeded: " + System.identityHashCode(fut));
 
                 succeeded++;
-            } catch (IgniteCheckedException e) {
+            }
+            catch (IgniteCheckedException e) {
                 info(e.getMessage());
 
                 failed++;
@@ -289,6 +293,20 @@
     }
 
     /**
+     * @throws Exception If failed.
+     */
+    public void testStartStopCachesSimpleTransactional() throws Exception {
+        checkStartStopCachesSimple(CacheAtomicityMode.TRANSACTIONAL);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStartStopCachesSimpleAtomic() throws Exception {
+        checkStartStopCachesSimple(CacheAtomicityMode.ATOMIC);
+    }
+
+    /**
      * @param mode Cache atomicity mode.
      * @throws Exception If failed.
      */
@@ -325,10 +343,10 @@
         for (int g = 0; g < nodeCount(); g++)
             caches[g] = grid(g).cache(DYNAMIC_CACHE_NAME);
 
-        kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+        kernal.destroyCache(DYNAMIC_CACHE_NAME);
 
         for (int g = 0; g < nodeCount(); g++) {
-            final IgniteKernal kernal0 = (IgniteKernal) grid(g);
+            final IgniteKernal kernal0 = (IgniteKernal)grid(g);
 
             final int idx = g;
 
@@ -346,6 +364,87 @@
     }
 
     /**
+     * @param mode Cache atomicity mode.
+     * @throws Exception If failed.
+     */
+    private void checkStartStopCachesSimple(CacheAtomicityMode mode) throws Exception {
+        final IgniteEx kernal = grid(0);
+        final int cacheCnt = 3;
+
+        List<CacheConfiguration> ccfgList = new ArrayList<>();
+
+        for (int i = 0; i < cacheCnt; i++) {
+            CacheConfiguration ccfg = new CacheConfiguration();
+            ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+            ccfg.setAtomicityMode(mode);
+            ccfg.setName(DYNAMIC_CACHE_NAME + Integer.toString(i));
+
+            ccfgList.add(ccfg);
+        }
+
+        kernal.createCaches(ccfgList);
+
+        for (int g = 0; g < nodeCount(); g++) {
+            IgniteEx kernal0 = grid(g);
+
+            for (IgniteInternalFuture f : kernal0.context().cache().context().exchange().exchangeFutures())
+                f.get();
+
+            info("Getting cache for node: " + g);
+
+            for (int i = 0; i < cacheCnt; i++)
+                assertNotNull(grid(g).cache(DYNAMIC_CACHE_NAME + Integer.toString(i)));
+        }
+
+        for (int i = 0; i < cacheCnt; i++)
+            grid(0).cache(DYNAMIC_CACHE_NAME + Integer.toString(i)).put(Integer.toString(i), Integer.toString(i));
+
+        for (int g = 0; g < nodeCount(); g++) {
+            for (int i = 0; i < cacheCnt; i++) {
+                assertEquals(
+                    Integer.toString(i),
+                    grid(g).cache(DYNAMIC_CACHE_NAME + Integer.toString(i)).get(Integer.toString(i))
+                );
+            }
+        }
+
+        // Grab caches before stop.
+        final IgniteCache[] caches = new IgniteCache[nodeCount() * cacheCnt];
+
+        for (int g = 0; g < nodeCount(); g++) {
+            for (int i = 0; i < cacheCnt; i++)
+                caches[g * nodeCount() + i] = grid(g).cache(DYNAMIC_CACHE_NAME + Integer.toString(i));
+        }
+
+        List<String> namesToDestroy = new ArrayList<>();
+
+        for (int i = 0; i < cacheCnt; i++)
+            namesToDestroy.add(DYNAMIC_CACHE_NAME + Integer.toString(i));
+
+        kernal.destroyCaches(namesToDestroy);
+
+        for (int g = 0; g < nodeCount(); g++) {
+            final IgniteKernal kernal0 = (IgniteKernal)grid(g);
+
+            for (int i = 0; i < cacheCnt; i++) {
+                final int idx = g * nodeCount() + i;
+                final int expVal = i;
+
+                for (IgniteInternalFuture f : kernal0.context().cache().context().exchange().exchangeFutures())
+                    f.get();
+
+                assertNull(kernal0.cache(DYNAMIC_CACHE_NAME));
+
+                GridTestUtils.assertThrows(log, new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        return caches[idx].get(Integer.toString(expVal));
+                    }
+                }, IllegalStateException.class, null);
+            }
+        }
+    }
+
+    /**
      * @throws Exception If failed.
      */
     public void testStartStopCacheAddNode() throws Exception {
@@ -378,13 +477,13 @@
             }
 
             // Undeploy cache.
-            kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+            kernal.destroyCache(DYNAMIC_CACHE_NAME);
 
             startGrid(nodeCount() + 1);
 
             // Check that cache is not deployed on new node after undeploy.
             for (int g = 0; g < nodeCount() + 2; g++) {
-                final IgniteKernal kernal0 = (IgniteKernal) grid(g);
+                final IgniteKernal kernal0 = (IgniteKernal)grid(g);
 
                 for (IgniteInternalFuture f : kernal0.context().cache().context().exchange().exchangeFutures())
                     f.get();
@@ -431,16 +530,16 @@
             for (int g = 0; g < nodeCount(); g++) {
                 for (int i = 0; i < 100; i++) {
                     assertFalse(grid(g).affinity(DYNAMIC_CACHE_NAME).mapKeyToPrimaryAndBackups(i)
-                            .contains(grid(nodeCount()).cluster().localNode()));
+                        .contains(grid(nodeCount()).cluster().localNode()));
 
                     assertFalse(grid(g).affinity(DYNAMIC_CACHE_NAME).mapKeyToPrimaryAndBackups(i)
-                            .contains(grid(nodeCount() + 1).cluster().localNode()));
+                        .contains(grid(nodeCount() + 1).cluster().localNode()));
                 }
             }
 
             // Check that cache is not deployed on new node after undeploy.
             for (int g = 0; g < nodeCount() + 2; g++) {
-                final IgniteKernal kernal0 = (IgniteKernal) grid(g);
+                final IgniteKernal kernal0 = (IgniteKernal)grid(g);
 
                 for (IgniteInternalFuture f : kernal0.context().cache().context().exchange().exchangeFutures())
                     f.get();
@@ -455,7 +554,7 @@
                     }, IllegalArgumentException.class, null);
             }
 
-            kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+            kernal.destroyCache(DYNAMIC_CACHE_NAME);
 
             stopGrid(nodeCount() + 1);
             stopGrid(nodeCount());
@@ -489,6 +588,36 @@
     /**
      * @throws Exception If failed.
      */
+    public void testFailWhenOneOfConfiguredCacheExists() throws Exception {
+        GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                final Ignite kernal = grid(0);
+
+                CacheConfiguration ccfgDynamic = new CacheConfiguration();
+                ccfgDynamic.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+
+                ccfgDynamic.setName(DYNAMIC_CACHE_NAME);
+
+                ccfgDynamic.setNodeFilter(NODE_FILTER);
+
+                CacheConfiguration ccfgStatic = new CacheConfiguration();
+                ccfgStatic.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+
+                // Cache is already configured, should fail.
+                ccfgStatic.setName(STATIC_CACHE_NAME);
+
+                ccfgStatic.setNodeFilter(NODE_FILTER);
+
+                return kernal.createCaches(F.asList(ccfgDynamic, ccfgStatic));
+            }
+        }, CacheExistsException.class, null);
+
+        assertNull(grid(0).cache(DYNAMIC_CACHE_NAME));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
     public void testClientCache() throws Exception {
         try {
             testAttribute = false;
@@ -522,7 +651,7 @@
             for (int g = 0; g < nodeCount() + 1; g++)
                 assertEquals("1", ignite(g).cache(DYNAMIC_CACHE_NAME).get("1"));
 
-            kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+            kernal.destroyCache(DYNAMIC_CACHE_NAME);
         }
         finally {
             stopGrid(nodeCount());
@@ -547,7 +676,7 @@
 
             ccfg.setNodeFilter(NODE_FILTER);
 
-            final IgniteKernal started = (IgniteKernal) grid(nodeCount());
+            final IgniteKernal started = (IgniteKernal)grid(nodeCount());
 
             started.createCache(ccfg);
 
@@ -564,14 +693,13 @@
             for (int g = 0; g < nodeCount() + 1; g++)
                 assertEquals("1", ignite(g).cache(DYNAMIC_CACHE_NAME).get("1"));
 
-            kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+            kernal.destroyCache(DYNAMIC_CACHE_NAME);
         }
         finally {
             stopGrid(nodeCount());
         }
     }
 
-
     /**
      * @throws Exception If failed.
      */
@@ -610,7 +738,7 @@
             for (int g = 0; g < nodeCount() + 1; g++)
                 assertEquals("1", ignite(g).cache(DYNAMIC_CACHE_NAME).get("1"));
 
-            kernal.context().cache().dynamicDestroyCache(DYNAMIC_CACHE_NAME, true).get();
+            kernal.destroyCache(DYNAMIC_CACHE_NAME);
         }
         finally {
             stopGrid(nodeCount());
@@ -760,7 +888,7 @@
                 nearGrid.getOrCreateNearCache(DYNAMIC_CACHE_NAME, new NearCacheConfiguration());
 
                 GridCacheContext<Object, Object> nCtx = ((IgniteKernal)nearGrid)
-                        .internalCache(DYNAMIC_CACHE_NAME).context();
+                    .internalCache(DYNAMIC_CACHE_NAME).context();
 
                 assertTrue(nCtx.isNear());
                 assertFalse(nCtx.affinityNode());
@@ -771,11 +899,12 @@
                 clientGrid.getOrCreateCache(cfg);
 
                 GridCacheContext<Object, Object> cCtx = ((IgniteKernal)clientGrid)
-                        .internalCache(DYNAMIC_CACHE_NAME).context();
+                    .internalCache(DYNAMIC_CACHE_NAME).context();
 
                 assertFalse(cCtx.isNear());
                 assertFalse(cCtx.affinityNode());
-            } finally {
+            }
+            finally {
                 stopGrid(nodeCount() + 1);
                 stopGrid(nodeCount());
             }
@@ -785,6 +914,40 @@
         }
     }
 
+    /** {@inheritDoc} */
+    public void testGetOrCreateCollection() throws Exception {
+        final int cacheCnt = 3;
+
+        try {
+            final Collection<CacheConfiguration> ccfgs = new ArrayList<>();
+
+            for (int i = 0; i < cacheCnt; i++) {
+                final CacheConfiguration cfg = new CacheConfiguration();
+
+                cfg.setName(DYNAMIC_CACHE_NAME + Integer.toString(i));
+                cfg.setNodeFilter(NODE_FILTER);
+
+                ccfgs.add(cfg);
+
+                grid(0).getOrCreateCaches(ccfgs);
+            }
+
+            for (int i = 0; i < cacheCnt; i++) {
+                assertNotNull(grid(0).cache(DYNAMIC_CACHE_NAME + Integer.toString(i)));
+
+                IgniteCache<Object, Object> jcache = grid(0).cache(DYNAMIC_CACHE_NAME + Integer.toString(i));
+
+                jcache.put(Integer.toString(i), Integer.toString(i));
+
+                assertEquals(jcache.get(Integer.toString(i)), Integer.toString(i));
+            }
+        }
+        finally {
+            for (int i = 0; i < cacheCnt; i++)
+                grid(0).destroyCache(DYNAMIC_CACHE_NAME + Integer.toString(i));
+        }
+    }
+
     /**
      * @throws Exception If failed.
      */
@@ -813,7 +976,7 @@
             assertNull(err.get());
 
             for (int i = 0; i < nodeCount(); i++) {
-                GridCacheContext<Object, Object> ctx = ((IgniteKernal) ignite(i)).internalCache(DYNAMIC_CACHE_NAME)
+                GridCacheContext<Object, Object> ctx = ((IgniteKernal)ignite(i)).internalCache(DYNAMIC_CACHE_NAME)
                     .context();
 
                 assertTrue(ctx.affinityNode());
@@ -906,7 +1069,7 @@
                 assertNull(err.get());
 
                 for (int i = 0; i < nodeCount(); i++) {
-                    GridCacheContext<Object, Object> ctx = ((IgniteKernal) ignite(i)).internalCache(DYNAMIC_CACHE_NAME)
+                    GridCacheContext<Object, Object> ctx = ((IgniteKernal)ignite(i)).internalCache(DYNAMIC_CACHE_NAME)
                         .context();
 
                     assertTrue(ctx.affinityNode());
@@ -914,7 +1077,7 @@
                 }
 
                 for (int i = 0; i < clientCnt; i++) {
-                    GridCacheContext<Object, Object> ctx = ((IgniteKernal) ignite(nodeCount() + i))
+                    GridCacheContext<Object, Object> ctx = ((IgniteKernal)ignite(nodeCount() + i))
                         .internalCache(DYNAMIC_CACHE_NAME).context();
 
                     assertFalse(ctx.affinityNode());
@@ -995,12 +1158,12 @@
         for (int i = 0; i < nodeCount(); i++) {
             final int idx = i;
 
-                latches[i] = new CountDownLatch(1);
-                lsnrs[i] = new IgnitePredicate<CacheEvent>() {
-                    @Override public boolean apply(CacheEvent e) {
-                        switch (e.type()) {
-                            case EventType.EVT_CACHE_NODES_LEFT:
-                                latches[idx].countDown();
+            latches[i] = new CountDownLatch(1);
+            lsnrs[i] = new IgnitePredicate<CacheEvent>() {
+                @Override public boolean apply(CacheEvent e) {
+                    switch (e.type()) {
+                        case EventType.EVT_CACHE_NODES_LEFT:
+                            latches[idx].countDown();
 
                             break;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WithKeepBinaryCacheFullApiTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WithKeepBinaryCacheFullApiTest.java
index 1954a8d..3e6b0b0 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WithKeepBinaryCacheFullApiTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WithKeepBinaryCacheFullApiTest.java
@@ -459,11 +459,7 @@
 
         for (TransactionConcurrency conc : TransactionConcurrency.values()) {
             for (TransactionIsolation isolation : TransactionIsolation.values()) {
-                // TODO IGNITE-2971: delete this if when the issue will be fixed.
-                if (conc == TransactionConcurrency.OPTIMISTIC && isolation == TransactionIsolation.SERIALIZABLE)
-                    continue;
-
-                info(">>>>> Executing test using explicite txs [concurrency=" + conc + ", isolation=" + isolation + "]");
+                info(">>>>> Executing test using explicit txs [concurrency=" + conc + ", isolation=" + isolation + "]");
 
                 checkInvokeTx(conc, isolation);
 
@@ -671,10 +667,6 @@
 
         for (TransactionConcurrency conc : TransactionConcurrency.values()) {
             for (TransactionIsolation isolation : TransactionIsolation.values()) {
-                // TODO IGNITE-2971: delete this if when the issue will be fixed.
-                if (conc == TransactionConcurrency.OPTIMISTIC && isolation == TransactionIsolation.SERIALIZABLE)
-                    continue;
-
                 checkInvokeAsyncTx(conc, isolation);
 
                 jcache().removeAll();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheQueueApiSelfAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheQueueApiSelfAbstractTest.java
index f9499a1..93d0989 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheQueueApiSelfAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheQueueApiSelfAbstractTest.java
@@ -534,6 +534,72 @@
     }
 
     /**
+     * JUnit.
+     *
+     * @throws Exception If failed.
+     */
+    public void testPutRemovePeekPollUnbounded() throws Exception {
+        // Random queue name.
+        String queueName = UUID.randomUUID().toString();
+
+        IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));
+
+        for (int i = 0; i < QUEUE_CAPACITY; i++)
+            queue.put("Item-" + i);
+
+        assertEquals(QUEUE_CAPACITY, queue.size());
+
+        queue.remove("Item-1");
+
+        assertEquals(QUEUE_CAPACITY - 1, queue.size());
+
+        assertEquals("Item-0", queue.peek());
+        assertEquals("Item-0", queue.poll());
+        assertEquals("Item-2", queue.poll());
+
+        assertEquals(0, queue.size());
+
+        queue.clear();
+
+        assertTrue(queue.isEmpty());
+    }
+
+    /**
+     * JUnit.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRemovePeek() throws Exception {
+        // Random queue name.
+        String queueName = UUID.randomUUID().toString();
+
+        IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));
+
+        for (int i = 0; i < 5; i++)
+            queue.put("Item-" + i);
+
+        queue.remove("Item-1");
+
+        assertEquals("Item-0", queue.peek());
+
+        queue.remove("Item-2");
+
+        assertEquals("Item-0", queue.peek());
+
+        queue.remove("Item-0");
+
+        assertEquals("Item-3", queue.peek());
+
+        queue.remove("Item-4");
+
+        assertEquals("Item-3", queue.peek());
+
+        queue.remove("Item-3");
+
+        assertNull(queue.peek());
+    }
+
+    /**
      * @throws Exception If failed.
      */
     public void testReuseCache() throws Exception {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/SemaphoreFailoverSafeReleasePermitsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/SemaphoreFailoverSafeReleasePermitsTest.java
new file mode 100644
index 0000000..241253d
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/SemaphoreFailoverSafeReleasePermitsTest.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.datastructures;
+
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteSemaphore;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.configuration.AtomicConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/**
+ *
+ */
+public class SemaphoreFailoverSafeReleasePermitsTest extends GridCommonAbstractTest {
+    /** */
+    protected static TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
+
+    /** Grid count. */
+    private static final int GRID_CNT = 3;
+
+    /** Atomics cache mode. */
+    private CacheMode atomicsCacheMode;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi spi = new TcpDiscoverySpi();
+
+        spi.setIpFinder(ipFinder);
+
+        cfg.setDiscoverySpi(spi);
+
+        AtomicConfiguration atomicCfg = atomicConfiguration();
+
+        assertNotNull(atomicCfg);
+
+        cfg.setAtomicConfiguration(atomicCfg);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReleasePermitsPartitioned() throws Exception {
+        atomicsCacheMode = PARTITIONED;
+
+        doTest();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReleasePermitsReplicated() throws Exception {
+        atomicsCacheMode = REPLICATED;
+
+        doTest();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void doTest() throws Exception {
+        try {
+            startGrids(GRID_CNT);
+
+            Ignite ignite = grid(0);
+
+            IgniteSemaphore sem = ignite.semaphore("sem", 1, true, true);
+
+            assertEquals(1, sem.availablePermits());
+
+            sem.acquire(1);
+
+            assertEquals(0, sem.availablePermits());
+
+            ignite.close();
+
+            awaitPartitionMapExchange();
+
+            ignite = grid(1);
+
+            sem = ignite.semaphore("sem", 1, true, true);
+
+            assertTrue(sem.tryAcquire(1, 5000, TimeUnit.MILLISECONDS));
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @return Atomic configuration.
+     */
+    protected AtomicConfiguration atomicConfiguration() {
+        AtomicConfiguration atomicCfg = new AtomicConfiguration();
+
+        atomicCfg.setCacheMode(atomicsCacheMode);
+
+        if (atomicsCacheMode == PARTITIONED)
+            atomicCfg.setBackups(1);
+
+        return atomicCfg;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCachePartitionedBackupNodeFailureRecoveryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCachePartitionedBackupNodeFailureRecoveryTest.java
new file mode 100644
index 0000000..6654fd9
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCachePartitionedBackupNodeFailureRecoveryTest.java
@@ -0,0 +1,193 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicWriteOrderMode;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.NearCacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.IgniteCacheAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicWriteOrderMode.PRIMARY;
+import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheRebalanceMode.SYNC;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC;
+import static org.apache.ignite.testframework.GridTestUtils.runAsync;
+
+/**
+ */
+public class IgniteCachePartitionedBackupNodeFailureRecoveryTest extends IgniteCacheAbstractTest {
+    /** {@inheritDoc}*/
+    @Override protected int gridCount() {
+        return 3;
+    }
+
+    /** {@inheritDoc}*/
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /** {@inheritDoc}*/
+    @Override protected CacheAtomicityMode atomicityMode() {
+        return ATOMIC;
+    }
+
+    /** {@inheritDoc}*/
+    @Override protected CacheAtomicWriteOrderMode atomicWriteOrderMode() {
+        return PRIMARY;
+    }
+
+    /** {@inheritDoc}*/
+    @Override protected NearCacheConfiguration nearConfiguration() {
+        return new NearCacheConfiguration();
+    }
+
+    /** {@inheritDoc}*/
+    @Override protected CacheConfiguration cacheConfiguration(String gridName) throws Exception {
+        CacheConfiguration ccfg = super.cacheConfiguration(gridName);
+
+        ccfg.setBackups(1);
+        ccfg.setWriteSynchronizationMode(PRIMARY_SYNC);
+        ccfg.setRebalanceMode(SYNC);
+
+        return ccfg;
+    }
+
+    /**
+     * Test stops and restarts backup node.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackUpFail() throws Exception {
+        final IgniteEx node1 = grid(0);
+        final IgniteEx node2 = grid(1);
+        final IgniteEx node3 = grid(2);
+
+        awaitPartitionMapExchange();
+
+        final IgniteCache<Integer, Integer> cache1 = node1.cache(null);
+
+        Affinity<Integer> aff = node1.affinity(null);
+
+        Integer key0 = null;
+
+        for (int key = 0; key < 10_000; key++) {
+            if (aff.isPrimary(node2.cluster().localNode(), key) && aff.isBackup(node3.cluster().localNode(), key)) {
+                key0 = key;
+
+                break;
+            }
+        }
+
+        assertNotNull(key0);
+
+        cache1.put(key0, 0);
+
+        final AtomicBoolean finished = new AtomicBoolean();
+
+        final ReentrantLock lock = new ReentrantLock();
+
+        final AtomicInteger cntr = new AtomicInteger();
+
+        final Integer finalKey = key0;
+
+        IgniteInternalFuture<Void> primaryFut;
+        IgniteInternalFuture<Void> backupFut;
+
+        try {
+            primaryFut = runAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    while (!finished.get()) {
+                        lock.lock();
+
+                        try {
+                            cache1.invoke(finalKey, new TestEntryProcessor());
+
+                            cntr.getAndIncrement();
+                        }
+                        finally {
+                            lock.unlock();
+                        }
+                    }
+
+                    return null;
+                }
+            });
+
+            backupFut = runAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    while (!finished.get()) {
+                        stopGrid(2);
+
+                        IgniteEx backUp = startGrid(2);
+
+                        IgniteCache<Integer, Integer> cache3 = backUp.cache(null);
+
+                        lock.lock();
+
+                        try {
+                            Integer backUpVal = cache3.localPeek(finalKey);
+
+                            Integer exp = cntr.get();
+
+                            assertEquals(exp, backUpVal);
+                        }
+                        finally {
+                            lock.unlock();
+                        }
+                    }
+                    return null;
+                }
+            });
+
+            Thread.sleep(30_000);
+        }
+        finally {
+            finished.set(true);
+        }
+
+        primaryFut.get();
+        backupFut.get();
+    }
+
+    /**
+     *
+     */
+    static class TestEntryProcessor implements EntryProcessor<Integer, Integer, Void> {
+        /** {@inheritDoc}*/
+        @Override public Void process(MutableEntry<Integer, Integer> entry, Object... args) {
+            Integer v = entry.getValue() + 1;
+
+            entry.setValue(v);
+
+            return null;
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/CacheNodeSafeAssertion.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/CacheNodeSafeAssertion.java
new file mode 100644
index 0000000..bf6b63f
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/CacheNodeSafeAssertion.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.rebalancing;
+
+import java.util.Collection;
+import java.util.Iterator;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.testframework.assertions.Assertion;
+
+/**
+ * {@link Assertion} that checks that the primary and backup partitions are distributed such that we won't lose any data
+ * if we lose a single node. This implies that the cache in question was configured with a backup count of at least one
+ * and that all partitions are backed up to a different node from the primary.
+ */
+public class CacheNodeSafeAssertion implements Assertion {
+    /** The {@link Ignite} instance. */
+    private final Ignite ignite;
+
+    /** The cache name. */
+    private final String cacheName;
+
+    /**
+     * Construct a new {@link CacheNodeSafeAssertion} for the given {@code cacheName}.
+     *
+     * @param ignite The Ignite instance.
+     * @param cacheName The cache name.
+     */
+    public CacheNodeSafeAssertion(Ignite ignite, String cacheName) {
+        this.ignite = ignite;
+        this.cacheName = cacheName;
+    }
+
+    /**
+     * @return Ignite instance.
+     */
+    protected Ignite ignite() {
+        return ignite;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void test() throws AssertionError {
+        Affinity<?> affinity = ignite.affinity(cacheName);
+
+        int partCnt = affinity.partitions();
+
+        boolean hostSafe = true;
+
+        boolean nodeSafe = true;
+
+        for (int x = 0; x < partCnt; ++x) {
+            // Results are returned with the primary node first and backups after. We want to ensure that there is at
+            // least one backup on a different host.
+            Collection<ClusterNode> results = affinity.mapPartitionToPrimaryAndBackups(x);
+
+            Iterator<ClusterNode> nodes = results.iterator();
+
+            boolean newHostSafe = false;
+
+            boolean newNodeSafe = false;
+
+            if (nodes.hasNext()) {
+                ClusterNode primary = nodes.next();
+
+                // For host safety, get all nodes on the same host as the primary node and ensure at least one of the
+                // backups is on a different host. For node safety, make sure at least of of the backups is not the
+                // primary.
+                Collection<ClusterNode> neighbors = hostSafe ? ignite.cluster().forHost(primary).nodes() : null;
+
+                while (nodes.hasNext()) {
+                    ClusterNode backup = nodes.next();
+
+                    if (hostSafe) {
+                        if (!neighbors.contains(backup))
+                            newHostSafe = true;
+                    }
+
+                    if (nodeSafe) {
+                        if (!backup.equals(primary))
+                            newNodeSafe = true;
+                    }
+                }
+            }
+
+            hostSafe = newHostSafe;
+
+            nodeSafe = newNodeSafe;
+
+            if (!hostSafe && !nodeSafe)
+                break;
+        }
+
+        if (hostSafe)
+            return;
+
+        if (nodeSafe)
+            return;
+
+        throw new AssertionError("Cache " + cacheName + " is endangered!");
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java
new file mode 100644
index 0000000..62fc5e9
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java
@@ -0,0 +1,916 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.rebalancing;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.EntryProcessorResult;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteEvents;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.binary.BinaryObject;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.cache.CacheRebalanceMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.affinity.AffinityKeyMapped;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.events.CacheEvent;
+import org.apache.ignite.events.CacheRebalancingEvent;
+import org.apache.ignite.events.Event;
+import org.apache.ignite.events.EventType;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.services.Service;
+import org.apache.ignite.services.ServiceConfiguration;
+import org.apache.ignite.services.ServiceContext;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicWriteOrderMode.PRIMARY;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+
+/**
+ * Test to validate cache and partition events raised from entry processors executing against
+ * partitions are are loading/unloading.
+ * <p>
+ * The test consists of two parts:
+ * <p>
+ * 1. The server side that maintains a map of partition id to the set of keys that belong to
+ * that partition for all partitions (primary + backup) owned by the server. This map
+ * is updated by a local listener registered for the following events:
+ * <ul>
+ *   <li>EVT_CACHE_OBJECT_PUT</li>
+ *   <li>EVT_CACHE_OBJECT_REMOVED</li>
+ *   <li>EVT_CACHE_REBALANCE_OBJECT_LOADED</li>
+ *   <li>EVT_CACHE_REBALANCE_OBJECT_UNLOADED</li>
+ *   <li>EVT_CACHE_REBALANCE_PART_LOADED</li>
+ *   <li>EVT_CACHE_REBALANCE_PART_UNLOADED</li>
+ *   <li>EVT_CACHE_REBALANCE_PART_DATA_LOST</li>
+ * </ul>
+ * 2. The client side that generates a random number of keys for each partition and populates
+ * the cache. When the cache is loaded, each partition has at least one key assigned to it. The
+ * client then issues an {@code invokeAll} on the cache with a key set consisting of one key
+ * belonging to each partition.
+ * <p>
+ * The test makes the following assertions:
+ * <ol>
+ *     <li>EntryProcessors should execute against partitions that are owned/fully loaded.
+ *     If a processor executes against a partition that is partially loaded, the message
+ *     "Key validation requires a retry for partitions" is logged on the client, and
+ *     "Retrying validation for primary partition N due to newly arrived partition..." and
+ *     "Retrying validation for primary partition N due to forming partition..." is logged
+ *     on the server side.</li>
+ *     <li>Events for entries being added/removed and partitions being loaded/unloaded
+ *     should always be delivered to the server nodes that own the partition. If this does
+ *     not happen, the client will log "For primary partition N expected [...], but
+ *     found [...]; missing local keys: []" and "Key validation failed for partitions: [...]".
+ *     The server will log "Retrying validation for primary|backup partition N due to
+ *     forming partition" and "For primary|backup partition N expected [...], but found [...];"
+ *     </li>
+ * </ol>
+ */
+public class GridCacheRebalancingOrderingTest extends GridCommonAbstractTest {
+    /** {@link Random} for test key generation. */
+    private final static Random RANDOM = new Random();
+
+    /** Test cache name. */
+    private static final String TEST_CACHE_NAME = "TestCache";
+
+    /** Flag to configure transactional versus non-transactional cache. */
+    public static final boolean TRANSACTIONAL = false;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        if (isFirstGrid(gridName)) {
+            cfg.setClientMode(true);
+
+            assert cfg.getDiscoverySpi() instanceof TcpDiscoverySpi : cfg.getDiscoverySpi();
+
+            ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setForceServerMode(true);
+        }
+        else
+            cfg.setServiceConfiguration(getServiceConfiguration());
+
+        cfg.setCacheConfiguration(getCacheConfiguration());
+
+        return cfg;
+    }
+
+    /**
+     * @return service configuration for services used in test cluster
+     * @see #getConfiguration()
+     */
+    private ServiceConfiguration getServiceConfiguration() {
+        ServiceConfiguration cfg = new ServiceConfiguration();
+
+        cfg.setName(PartitionObserver.class.getName());
+        cfg.setService(new PartitionObserverService());
+        cfg.setMaxPerNodeCount(1);
+        cfg.setTotalCount(0); // 1 service per node.
+
+        return cfg;
+    }
+
+    /**
+     * @return Cache configuration used by test.
+     * @see #getConfiguration().
+     */
+    protected CacheConfiguration<IntegerKey, Integer> getCacheConfiguration() {
+        CacheConfiguration<IntegerKey, Integer> cfg = new CacheConfiguration<>();
+
+        cfg.setAtomicityMode(TRANSACTIONAL ? CacheAtomicityMode.TRANSACTIONAL : CacheAtomicityMode.ATOMIC);
+        cfg.setCacheMode(CacheMode.PARTITIONED);
+        cfg.setName(TEST_CACHE_NAME);
+        cfg.setAffinity(new RendezvousAffinityFunction(true /* machine-safe */, 271));
+        cfg.setAtomicWriteOrderMode(PRIMARY);
+        cfg.setBackups(1);
+        cfg.setRebalanceMode(CacheRebalanceMode.SYNC);
+        cfg.setWriteSynchronizationMode(FULL_SYNC);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean isMultiJvm() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 1000 * 60 * 5;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /**
+     * Convert the given key from binary form, if necessary.
+     *
+     * @param key the key to convert if necessary
+     * @return the key
+     */
+    private static IntegerKey ensureKey(Object key) {
+        Object converted = key instanceof BinaryObject ? ((BinaryObject) key).deserialize() : key;
+        return converted instanceof IntegerKey ? (IntegerKey) converted : null;
+    }
+
+    /**
+     * Determine which of the specified keys (if any) are missing locally from the given cache.
+     *
+     * @param cache The cache to check.
+     * @param exp The expected set of keys.
+     * @return The set of missing keys.
+     */
+    private static Set<IntegerKey> getMissingKeys(IgniteCache<IntegerKey, Integer> cache, Set<IntegerKey> exp) {
+        Set<IntegerKey> missing = new HashSet<>();
+
+        for (IntegerKey key : exp) {
+            if (cache.localPeek(key, CachePeekMode.ALL) == null)
+                missing.add(key);
+        }
+
+        return missing;
+    }
+
+    /**
+     * For an Ignite cache, generate a random {@link IntegerKey} per partition. The number
+     * of partitions is determined by the cache's {@link Affinity}.
+     *
+     * @param ignite Ignite instance.
+     * @param cache  Cache to generate keys for.
+     * @return Map of partition number to randomly generated key.
+     */
+    private Map<Integer, IntegerKey> generateKeysForPartitions(Ignite ignite, IgniteCache<IntegerKey, Integer> cache) {
+        Affinity<IntegerKey> affinity = ignite.affinity(cache.getName());
+
+        int parts = affinity.partitions();
+
+        Map<Integer, IntegerKey> keyMap = new HashMap<>(parts);
+
+        for (int i = 0; i < parts; i++) {
+            boolean found = false;
+
+            do {
+                IntegerKey key = new IntegerKey(RANDOM.nextInt(10000));
+
+                if (affinity.partition(key) == i) {
+                    keyMap.put(i, key);
+                    found = true;
+                }
+            } while (!found);
+        }
+
+        // Sanity check.
+        if (keyMap.size() != affinity.partitions())
+            throw new IllegalStateException("Inconsistent partition count");
+
+        for (int i = 0; i < parts; i++) {
+            IntegerKey key = keyMap.get(i);
+
+            if (affinity.partition(key) != i)
+                throw new IllegalStateException("Inconsistent partition");
+        }
+
+        return keyMap;
+    }
+
+    /**
+     * Starts background thread that launches servers. This method will block
+     * until at least one server is running.
+     *
+     * @return {@link ServerStarter} runnable that starts servers
+     * @throws Exception If failed.
+     */
+    private ServerStarter startServers() throws Exception {
+        ServerStarter srvStarter = new ServerStarter();
+
+        Thread t = new Thread(srvStarter);
+        t.setDaemon(true);
+        t.setName("Server Starter");
+        t.start();
+
+        srvStarter.waitForServerStart();
+
+        return srvStarter;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testEvents() throws Exception {
+        Ignite ignite = startGrid(0);
+
+        ServerStarter srvStarter = startServers();
+
+        IgniteCache<IntegerKey, Integer> cache = ignite.cache(TEST_CACHE_NAME);
+
+        // Generate a key per partition.
+        Map<Integer, IntegerKey> keyMap = generateKeysForPartitions(ignite, cache);
+
+        // Populate a random number of keys per partition.
+        Map<Integer, Set<IntegerKey>> partMap = new HashMap<>(keyMap.size());
+
+        for (Map.Entry<Integer, IntegerKey> entry : keyMap.entrySet()) {
+            Integer part = entry.getKey();
+            int affinity = entry.getValue().getKey();
+            int cnt = RANDOM.nextInt(10) + 1;
+
+            Set<IntegerKey> keys = new HashSet<>(cnt);
+
+            for (int i = 0; i < cnt; i++) {
+                IntegerKey key = new IntegerKey(RANDOM.nextInt(10000), affinity);
+                keys.add(key);
+                cache.put(key, RANDOM.nextInt());
+            }
+
+            partMap.put(part, keys);
+        }
+
+        // Display the partition map.
+        X.println("Partition Map:");
+
+        for (Map.Entry<Integer, Set<IntegerKey>> entry : partMap.entrySet())
+            X.println(entry.getKey() + ": " + entry.getValue());
+
+        // Validate keys across all partitions.
+        Affinity<IntegerKey> affinity = ignite.affinity(cache.getName());
+
+        Map<IntegerKey, KeySetValidator> validatorMap = new HashMap<>(partMap.size());
+
+        for (Map.Entry<Integer, Set<IntegerKey>> partEntry : partMap.entrySet()) {
+            Integer part = partEntry.getKey();
+
+            validatorMap.put(keyMap.get(part), new KeySetValidator(partEntry.getValue()));
+        }
+
+        int i = 0;
+
+        while (!srvStarter.isDone()) {
+            Map<IntegerKey, EntryProcessorResult<KeySetValidator.Result>> results = cache.invokeAll(validatorMap);
+
+            Set<Integer> failures = new HashSet<>();
+            Set<Integer> retries = new HashSet<>();
+
+            for (Map.Entry<IntegerKey, EntryProcessorResult<KeySetValidator.Result>> result : results.entrySet()) {
+                try {
+                    if (result.getValue().get() == KeySetValidator.Result.RETRY)
+                        retries.add(affinity.partition(result.getKey()));
+                }
+                catch (Exception e) {
+                    X.println("!!! " + e.getMessage());
+                    e.printStackTrace();
+                    failures.add(affinity.partition(result.getKey()));
+                }
+            }
+
+            if (!failures.isEmpty()) {
+                X.println("*** Key validation failed for partitions: " + failures);
+                fail("https://issues.apache.org/jira/browse/IGNITE-3456");
+            }
+            else if (!retries.isEmpty()) {
+                X.println("*** Key validation requires a retry for partitions: " + retries);
+                retries.clear();
+            }
+            else
+                X.println("*** Key validation was successful: " + i);
+
+            i++;
+
+            Thread.sleep(500);
+        }
+    }
+
+    /**
+     * EntryProcessor that validates that the partition associated with the targeted key has a specified set of keys.
+     */
+    public static class KeySetValidator implements EntryProcessor<IntegerKey, Integer, KeySetValidator.Result> {
+        /** */
+        private final Set<IntegerKey> keys;
+
+        /**
+         * Create a new KeySetValidator.
+         *
+         * @param keys the expected keys belonging to the partition that owns the targeted key
+         */
+        KeySetValidator(Set<IntegerKey> keys) {
+            if (keys == null)
+                throw new IllegalArgumentException();
+
+            this.keys = keys;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Result process(MutableEntry<IntegerKey, Integer> entry, Object... objects) {
+            try {
+                Ignite ignite = entry.unwrap(Ignite.class);
+
+                PartitionObserver observer = ignite.services().service(PartitionObserver.class.getName());
+
+                assertNotNull(observer);
+
+                IgniteCache<IntegerKey, Integer> cache = ignite.cache(TEST_CACHE_NAME);
+
+                Affinity<IntegerKey> affinity = ignite.affinity(TEST_CACHE_NAME);
+
+                Set<IntegerKey> exp = this.keys;
+
+                Set<IntegerKey> missing = getMissingKeys(cache, exp);
+
+                IntegerKey key = entry.getKey();
+
+                int part = affinity.partition(key);
+
+                String ownership = affinity.isPrimary(ignite.cluster().localNode(), key) ? "primary" : "backup";
+
+                // Wait for the local listener to sync past events.
+                if (!observer.getIgniteLocalSyncListener().isSynced()) {
+                    ignite.log().info("Retrying validation for " + ownership + " partition " + part
+                            + " due to initial sync");
+
+                    return Result.RETRY;
+                }
+
+                // Determine if the partition is being loaded and wait for it to load completely.
+                if (observer.getLoadingMap().containsKey(part)) {
+                    ignite.log().info("Retrying validation due to forming partition [ownership=" + ownership +
+                        ", partition=" + part +
+                        ", expKeys=" + exp +
+                        ", loadedKeys=" + observer.getLoadingMap().get(part) +
+                        ", missingLocalKeys=" + missing + ']');
+
+                    return Result.RETRY;
+                }
+
+                if (!observer.getPartitionMap().containsKey(part)) {
+                    ignite.log().info("Retrying validation due to newly arrived partition [ownership=" + ownership +
+                        ", partition=" + part +
+                        ", missingLocalKeys=" + missing + ']');
+
+                    return Result.RETRY;
+                }
+
+                // Validate the key count.
+                Set<IntegerKey> curr = observer.ensureKeySet(part);
+
+                if (curr.equals(exp) && missing.isEmpty())
+                    return Result.OK;
+
+                String msg = String.format("For %s partition %s:\n\texpected  %s,\n\t" +
+                    "but found %s;\n\tmissing local keys: %s",
+                    ownership, part, new TreeSet<>(exp), new TreeSet<>(curr), new TreeSet<>(missing));
+
+                ignite.log().info(">>> " + msg);
+
+                throw new EntryProcessorException(msg);
+            }
+            catch (NullPointerException e) {
+                e.printStackTrace();
+
+                throw e;
+            }
+        }
+
+        /**
+         *
+         */
+        enum Result {
+            /** */
+            OK,
+            /** */
+            RETRY
+        }
+    }
+
+    /**
+     * Integer value that can optionally be associated with another integer.
+     */
+    public static class IntegerKey implements Comparable<IntegerKey> {
+        /**
+         * The integer key value.
+         */
+        private final int val;
+
+        /**
+         * The optional associated integer.
+         */
+        @AffinityKeyMapped
+        private final Integer affinity;
+
+        /**
+         * Create a new IntegerKey for the given integer value.
+         *
+         * @param val the integer key value
+         */
+        IntegerKey(int val) {
+            this.val = val;
+            this.affinity = val;
+        }
+
+        /**
+         * Create a new IntegerKey for the given integer value that is associated with the specified integer.
+         *
+         * @param val the integer key value
+         * @param affinity the associated integer
+         */
+        IntegerKey(int val, int affinity) {
+            this.val = val;
+            this.affinity = affinity;
+        }
+
+        /**
+         * Return the integer key value.
+         *
+         * @return the integer key value
+         */
+        public int getKey() {
+            return this.val;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return this.val;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (o == this)
+                return true;
+            if (o == null)
+                return false;
+
+            if (IntegerKey.class.equals(o.getClass())) {
+                IntegerKey that = (IntegerKey) o;
+                return this.val == that.val;
+            }
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            int val = this.val;
+            Integer affinity = this.affinity;
+
+            if (val == affinity)
+                return String.valueOf(val);
+
+            return "IntKey [val=" + val + ", aff=" + affinity + ']';
+        }
+
+        /** {@inheritDoc} */
+        @Override public int compareTo(final IntegerKey that) {
+            int i = this.affinity.compareTo(that.affinity);
+
+            if (i == 0)
+                i = Integer.compare(this.getKey(), that.getKey());
+
+            return i;
+        }
+    }
+
+    /**
+     * Local listener wrapper that brings a delegate listener up to date with the latest events.
+     */
+    private static class IgniteLocalSyncListener implements IgnitePredicate<Event> {
+        /** */
+        private final IgnitePredicate<Event> delegate;
+
+        /** */
+        private final int[] causes;
+
+        /** */
+        private volatile boolean isSynced;
+
+        /** */
+        private volatile long syncedId = Long.MIN_VALUE;
+
+        /**
+         * @param delegate Event listener.
+         * @param causes Event types to listen.
+         */
+        IgniteLocalSyncListener(IgnitePredicate<Event> delegate, int... causes) {
+            this.delegate = delegate;
+            this.causes = causes;
+        }
+
+        /**
+         * @return Local ignite.
+         */
+        protected Ignite ignite() {
+            return Ignition.localIgnite();
+        }
+
+        /**
+         *
+         */
+        public void register() {
+            ignite().events().localListen(this.delegate, this.causes);
+
+            sync();
+        }
+
+        /**
+         *
+         */
+        public void sync() {
+            if (!this.isSynced) {
+                synchronized (this) {
+                    if (!this.isSynced) {
+                        Collection<Event> evts = ignite().events().localQuery(new IgnitePredicate<Event>() {
+                                @Override public boolean apply(final Event evt) {
+                                    return true;
+                                }
+                            },
+                            this.causes);
+
+                        for (Event event : evts) {
+                            // Events returned from localQuery() are ordered by increasing local ID. Update the sync ID
+                            // within a finally block to avoid applying duplicate events if the delegate listener
+                            // throws an exception while processing the event.
+                            try {
+                                applyInternal(event);
+                            }
+                            finally {
+                                this.syncedId = event.localOrder();
+                            }
+                        }
+
+                        this.isSynced = true;
+
+                        notifyAll();
+                    }
+                }
+            }
+        }
+
+        /**
+         * @return Synced flag.
+         */
+        boolean isSynced() {
+            return isSynced;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean apply(Event evt) {
+            sync();
+
+            return applyInternal(evt);
+        }
+
+        /**
+         * @param evt Event.
+         * @return See {@link IgniteEvents#localListen}.
+         */
+        boolean applyInternal(Event evt) {
+            // Avoid applying previously recorded events.
+            if (evt.localOrder() > this.syncedId) {
+                try {
+                    return this.delegate.apply(evt);
+                }
+                catch (Exception e) {
+                    e.printStackTrace();
+
+                    return false;
+                }
+            }
+
+            return true;
+        }
+    }
+
+    /**
+     * Service interface for server side partition observation.
+     */
+    interface PartitionObserver {
+        /**
+         * @return map of partitions to the keys belonging to that partition
+         */
+        ConcurrentMap<Integer, Set<IntegerKey>> getPartitionMap();
+
+        /**
+         * @return Map of partitions that are in the process of loading and the current keys that belong to that partition.
+         * Currently it seems that an EntryProcessor is not guaranteed to have a "stable" view of a partition and
+         * can see entries as they are being loaded into the partition, so we must batch these events up in the map
+         * and update the {@link #getPartitionMap() partition map} atomically once the partition has been fully loaded.
+         */
+        ConcurrentMap<Integer, Set<IntegerKey>> getLoadingMap();
+
+        /**
+         * Ensure that the {@link #getPartitionMap() partition map} has a set of keys associated with the given
+         * partition, creating one if it doesn't already exist.
+         * @param part the partition
+         * @return the set for the given partition
+         */
+        Set<IntegerKey> ensureKeySet(int part);
+
+        /**
+         * @return listener wrapper that brings a delegate listener up to date with the latest events
+         */
+        IgniteLocalSyncListener getIgniteLocalSyncListener();
+    }
+
+    /**
+     *
+     */
+    private static class PartitionObserverService implements Service, PartitionObserver, Serializable {
+        /** */
+        private final ConcurrentMap<Integer, Set<IntegerKey>> partMap = new ConcurrentHashMap<>();
+
+        /** */
+        private final ConcurrentMap<Integer, Set<IntegerKey>> loadingMap = new ConcurrentHashMap<>();
+
+        /** */
+        private final IgnitePredicate<Event> pred = (IgnitePredicate<Event>) new IgnitePredicate<Event>() {
+            @Override public boolean apply(Event evt) {
+                // Handle:
+                // EVT_CACHE_OBJECT_PUT
+                // EVT_CACHE_REBALANCE_OBJECT_LOADED
+                // EVT_CACHE_OBJECT_REMOVED
+                // EVT_CACHE_REBALANCE_OBJECT_UNLOADED
+                if (evt instanceof CacheEvent) {
+                    CacheEvent cacheEvt = (CacheEvent) evt;
+                    int part = cacheEvt.partition();
+
+                    // Oonly handle events for the test cache.
+                    if (TEST_CACHE_NAME.equals(cacheEvt.cacheName())) {
+                        switch (evt.type()) {
+                            case EventType.EVT_CACHE_OBJECT_PUT: {
+                                ensureKeySet(part).add(ensureKey(cacheEvt.key()));
+                                break;
+                            }
+
+                            case EventType.EVT_CACHE_REBALANCE_OBJECT_LOADED: {
+                                // Batch up objects that are being loaded.
+                                ensureKeySet(part, loadingMap).add(ensureKey(cacheEvt.key()));
+                                break;
+                            }
+
+                            case EventType.EVT_CACHE_OBJECT_REMOVED:
+                            case EventType.EVT_CACHE_REBALANCE_OBJECT_UNLOADED: {
+                                ensureKeySet(part).remove(ensureKey(cacheEvt.key()));
+                                break;
+                            }
+                        }
+                    }
+                }
+                // Handle:
+                // EVT_CACHE_REBALANCE_PART_LOADED
+                // EVT_CACHE_REBALANCE_PART_UNLOADED
+                // EVT_CACHE_REBALANCE_PART_DATA_LOST
+                else if (evt instanceof CacheRebalancingEvent) {
+                    CacheRebalancingEvent rebalancingEvt = (CacheRebalancingEvent) evt;
+
+                    int part = rebalancingEvt.partition();
+
+                    // Only handle events for the test cache.
+                    if (TEST_CACHE_NAME.equals(rebalancingEvt.cacheName())) {
+                        switch (evt.type()) {
+                            case EventType.EVT_CACHE_REBALANCE_PART_UNLOADED: {
+                                Set<IntegerKey> keys = partMap.get(part);
+
+                                if (keys != null && !keys.isEmpty())
+                                    X.println("!!! Attempting to unload non-empty partition: " + part + "; keys=" + keys);
+
+                                partMap.remove(part);
+
+                                X.println("*** Unloaded partition: " + part);
+
+                                break;
+                            }
+
+                            case EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST: {
+                                partMap.remove(part);
+
+                                X.println("*** Lost partition: " + part);
+
+                                break;
+                            }
+
+                            case EventType.EVT_CACHE_REBALANCE_PART_LOADED: {
+                                // Atomically update the key count for the new partition.
+                                Set<IntegerKey> keys = loadingMap.get(part);
+                                partMap.put(part, keys);
+                                loadingMap.remove(part);
+
+                                X.println("*** Loaded partition: " + part + "; keys=" + keys);
+
+                                break;
+                            }
+                        }
+                    }
+                }
+
+                return true;
+            }
+        };
+
+        /** */
+        private final IgniteLocalSyncListener lsnr = new IgniteLocalSyncListener(pred,
+            EventType.EVT_CACHE_OBJECT_PUT,
+            EventType.EVT_CACHE_OBJECT_REMOVED,
+            EventType.EVT_CACHE_REBALANCE_OBJECT_LOADED,
+            EventType.EVT_CACHE_REBALANCE_OBJECT_UNLOADED,
+            EventType.EVT_CACHE_REBALANCE_PART_LOADED,
+            EventType.EVT_CACHE_REBALANCE_PART_UNLOADED,
+            EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST);
+
+        /** {@inheritDoc} */
+        @Override public ConcurrentMap<Integer, Set<IntegerKey>> getPartitionMap() {
+            return partMap;
+        }
+
+        /** {@inheritDoc} */
+        @Override public ConcurrentMap<Integer, Set<IntegerKey>> getLoadingMap() {
+            return loadingMap;
+        }
+
+        /** {@inheritDoc} */
+        @Override public IgniteLocalSyncListener getIgniteLocalSyncListener() {
+            return lsnr;
+        }
+
+        /**
+         * Ensure that the static partition map has a set of keys associated with the given partition,
+         * creating one if it doesn't already exist.
+         *
+         * @param part the partition
+         * @return the set for the given partition
+         */
+        public Set<IntegerKey> ensureKeySet(final int part) {
+            return ensureKeySet(part, partMap);
+        }
+
+        /**
+         * Ensure that the given partition map has a set of keys associated with the given partition, creating one if it
+         * doesn't already exist.
+         *
+         * @param part the partition
+         * @param map the partition map
+         *
+         * @return the set for the given partition
+         */
+        Set<IntegerKey> ensureKeySet(final int part, final ConcurrentMap<Integer, Set<IntegerKey>> map) {
+            Set<IntegerKey> keys = map.get(part);
+
+            if (keys == null) {
+                map.putIfAbsent(part, new CopyOnWriteArraySet<IntegerKey>());
+
+                keys = map.get(part);
+            }
+
+            return keys;
+        }
+
+
+        /** {@inheritDoc} */
+        @Override public void cancel(final ServiceContext ctx) {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public void init(final ServiceContext ctx) throws Exception {
+            this.lsnr.register();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void execute(final ServiceContext ctx) throws Exception {
+            // No-op.
+        }
+    }
+
+    /**
+     * Runnable that starts {@value #SERVER_COUNT} servers. This runnable starts
+     * servers every {@value #START_DELAY} milliseconds. The staggered start is intended
+     * to allow partitions to move every time a new server is started.
+     */
+    private class ServerStarter implements Runnable {
+        /** */
+        static final int SERVER_COUNT = 10;
+
+        /** */
+        static final int START_DELAY = 2000;
+
+        /** */
+        private volatile boolean done;
+
+        /** */
+        private final CountDownLatch started = new CountDownLatch(1);
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            try {
+                for (int i = 1; i <= SERVER_COUNT; i++) {
+                    startGrid(i);
+
+                    Thread.sleep(START_DELAY);
+
+                    awaitPartitionMapExchange();
+
+                    started.countDown();
+                }
+            }
+            catch (Exception e) {
+                e.printStackTrace();
+
+                X.println("Shutting down server starter thread");
+            }
+            finally {
+                done = true;
+            }
+        }
+
+        /**
+         * Blocks the executing thread until at least one server has started.
+         *
+         * @throws InterruptedException If interrupted.
+         */
+        void waitForServerStart() throws InterruptedException {
+            started.await(getTestTimeout(), TimeUnit.MILLISECONDS);
+        }
+
+        /** @return true if {@value #SERVER_COUNT} servers have started. */
+        public boolean isDone() {
+            return done;
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingPartitionDistributionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingPartitionDistributionTest.java
new file mode 100644
index 0000000..61ee9ea
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingPartitionDistributionTest.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.rebalancing;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheAtomicWriteOrderMode;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CacheRebalanceMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.testframework.assertions.Assertion;
+import org.apache.ignite.testframework.junits.common.GridRollingRestartAbstractTest;
+
+
+/**
+ * Test the behavior of the partition rebalancing during a rolling restart.
+ */
+public class GridCacheRebalancingPartitionDistributionTest extends GridRollingRestartAbstractTest {
+    /** The maximum allowable deviation from a perfect distribution. */
+    private static final double MAX_DEVIATION = 0.20;
+
+    /** Test cache name. */
+    private static final String CACHE_NAME = "PARTITION_DISTRIBUTION_TEST";
+
+    /** {@inheritDoc} */
+    @Override protected CacheConfiguration<Integer, Integer> getCacheConfiguration() {
+        return new CacheConfiguration<Integer, Integer>(CACHE_NAME)
+                .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+                .setCacheMode(CacheMode.PARTITIONED)
+                .setBackups(1)
+                .setAffinity(new RendezvousAffinityFunction(true /* machine-safe */, 271))
+                .setAtomicWriteOrderMode(CacheAtomicWriteOrderMode.CLOCK)
+                .setRebalanceMode(CacheRebalanceMode.SYNC)
+                .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+    }
+
+    /**
+     * The test performs rolling restart and checks no server drops out and the partitions are balanced during
+     * redistribution.
+     */
+    public void testRollingRestart() throws InterruptedException {
+        awaitPartitionMapExchange();
+
+        rollingRestartThread.join();
+
+        assertEquals(getMaxRestarts(), rollingRestartThread.getRestartTotal());
+    }
+
+    /** {@inheritDoc} */
+    @Override public int serverCount() {
+        return 5;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMaxRestarts() {
+        return 5;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgnitePredicate<Ignite> getRestartCheck() {
+        return new IgnitePredicate<Ignite>() {
+            @Override public boolean apply(final Ignite ignite) {
+                Collection<ClusterNode> srvs = ignite.cluster().forServers().nodes();
+
+                if (srvs.size() < serverCount())
+                    return false;
+
+                for (ClusterNode node : srvs) {
+                    int[] primaries = ignite.affinity(CACHE_NAME).primaryPartitions(node);
+
+                    if (primaries == null || primaries.length == 0)
+                        return false;
+                }
+
+                return true;
+            }
+        };
+    }
+
+    /** {@inheritDoc} */
+    @Override public Assertion getRestartAssertion() {
+        return new FairDistributionAssertion();
+    }
+
+    /**
+     * Assertion for {@link RollingRestartThread} to perform prior to each restart to test
+     * the Partition Distribution.
+     */
+    private class FairDistributionAssertion extends CacheNodeSafeAssertion {
+        /** Construct a new FairDistributionAssertion. */
+        public FairDistributionAssertion() {
+            super(grid(0), CACHE_NAME);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void test() throws AssertionError {
+            super.test();
+
+            Affinity<?> affinity = ignite().affinity(CACHE_NAME);
+
+            int partCnt = affinity.partitions();
+
+            Map<ClusterNode, Integer> partMap = new HashMap<>(serverCount());
+
+            for (int i = 0; i < partCnt; i++) {
+                ClusterNode node = affinity.mapPartitionToNode(i);
+
+                int cnt = partMap.containsKey(node) ? partMap.get(node) : 0;
+
+                partMap.put(node, cnt + 1);
+            }
+
+            int fairCnt = partCnt / serverCount();
+
+            for (int count : partMap.values()) {
+                double deviation = Math.abs(fairCnt - count) / (double)fairCnt;
+
+                if (deviation > MAX_DEVIATION) {
+                    throw new AssertionError("partition distribution deviation exceeded max: fair count=" + fairCnt
+                            + ", actual count=" + count + ", deviation=" + deviation);
+                }
+            }
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryNoUnsubscribeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryNoUnsubscribeTest.java
new file mode 100644
index 0000000..d7beb02
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryNoUnsubscribeTest.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.query.continuous;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.cache.configuration.FactoryBuilder;
+import javax.cache.event.CacheEntryEvent;
+import javax.cache.event.CacheEntryListenerException;
+import javax.cache.event.CacheEntryUpdatedListener;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheEntryEventSerializableFilter;
+import org.apache.ignite.cache.query.ContinuousQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class IgniteCacheContinuousQueryNoUnsubscribeTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static AtomicInteger cntr = new AtomicInteger();
+
+    /** */
+    private boolean client;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder);
+
+        cfg.setPeerClassLoadingEnabled(false);
+        cfg.setClientMode(client);
+
+        CacheConfiguration ccfg = new CacheConfiguration();
+
+        cfg.setCacheConfiguration(ccfg);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        startGridsMultiThreaded(3);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        super.afterTest();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoUnsubscribe() throws Exception {
+       checkNoUnsubscribe(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoUnsubscribeClient() throws Exception {
+        checkNoUnsubscribe(true);
+    }
+
+    /**
+     * @param client Client node flag.
+     * @throws Exception If failed.
+     */
+    private void checkNoUnsubscribe(boolean client) throws Exception {
+        cntr.set(0);
+
+        this.client = client;
+
+        try (Ignite ignite = startGrid(3)) {
+            ContinuousQuery qry = new ContinuousQuery();
+
+            qry.setLocalListener(new CacheEntryUpdatedListener() {
+                @Override public void onUpdated(Iterable evts) {
+                    // No-op.
+                }
+            });
+
+            qry.setRemoteFilterFactory(FactoryBuilder.factoryOf(CacheTestRemoteFilter.class));
+
+            qry.setAutoUnsubscribe(false);
+
+            ignite.cache(null).query(qry);
+
+            ignite.cache(null).put(1, 1);
+
+            assertEquals(1, cntr.get());
+        }
+
+        this.client = false;
+
+        try (Ignite newSrv = startGrid(3)) {
+            Integer key = primaryKey(newSrv.cache(null));
+
+            newSrv.cache(null).put(key, 1);
+
+            assertEquals(2, cntr.get());
+
+            for (int i = 0; i < 10; i++)
+                ignite(0).cache(null).put(i, 1);
+
+            assertEquals(12, cntr.get());
+        }
+
+        for (int i = 10; i < 20; i++)
+            ignite(0).cache(null).put(i, 1);
+
+        assertEquals(22, cntr.get());
+    }
+
+    /**
+     *
+     */
+    public static class CacheTestRemoteFilter implements CacheEntryEventSerializableFilter<Object, Object> {
+        /** {@inheritDoc} */
+        @Override public boolean evaluate(CacheEntryEvent<?, ?> evt) throws CacheEntryListenerException {
+            cntr.incrementAndGet();
+
+            return true;
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java
index c9f77cd..1b779c2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java
@@ -291,6 +291,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) {
+        throwUnsupported();
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
         throwUnsupported();
 
@@ -312,6 +319,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) {
+        throwUnsupported();
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> void addCacheConfiguration(CacheConfiguration<K, V> cacheCfg) {
         throwUnsupported();
     }
@@ -354,6 +368,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void destroyCaches(Collection<String> cacheNames) {
+        throwUnsupported();
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> cache(@Nullable String name) {
         throwUnsupported();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/rest/handlers/query/GridQueryCommandHandlerTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/rest/handlers/query/GridQueryCommandHandlerTest.java
new file mode 100644
index 0000000..7e4cd82
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/rest/handlers/query/GridQueryCommandHandlerTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.rest.handlers.query;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.ConnectorConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.rest.GridRestCommand;
+import org.apache.ignite.internal.processors.rest.GridRestResponse;
+import org.apache.ignite.internal.processors.rest.request.RestQueryRequest;
+import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor;
+import org.apache.ignite.testframework.junits.GridTestKernalContext;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import java.util.Collection;
+
+/**
+ * REST query command handler tests.
+ */
+public class GridQueryCommandHandlerTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid();
+
+        ConnectorConfiguration connCfg = new ConnectorConfiguration();
+
+        connCfg.setIdleQueryCursorCheckFrequency(1000);
+        connCfg.setIdleQueryCursorTimeout(1000);
+
+        grid().configuration().setConnectorConfiguration(connCfg);
+
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        stopAllGrids();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSupportedCommands() throws Exception {
+        GridTestKernalContext ctx = newContext(grid().configuration());
+
+        ctx.add(new GridTimeoutProcessor(ctx));
+
+        QueryCommandHandler cmdHnd = new QueryCommandHandler(ctx);
+
+        Collection<GridRestCommand> commands = cmdHnd.supportedCommands();
+
+        assertEquals(5, commands.size());
+
+        assertTrue(commands.contains(GridRestCommand.EXECUTE_SQL_QUERY));
+        assertTrue(commands.contains(GridRestCommand.EXECUTE_SQL_FIELDS_QUERY));
+        assertTrue(commands.contains(GridRestCommand.EXECUTE_SCAN_QUERY));
+        assertTrue(commands.contains(GridRestCommand.FETCH_SQL_QUERY));
+        assertTrue(commands.contains(GridRestCommand.CLOSE_SQL_QUERY));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUnsupportedCommands() throws Exception {
+        GridTestKernalContext ctx = newContext(grid().configuration());
+
+        ctx.add(new GridTimeoutProcessor(ctx));
+
+        QueryCommandHandler cmdHnd = new QueryCommandHandler(ctx);
+
+        Collection<GridRestCommand> commands = cmdHnd.supportedCommands();
+
+        assertFalse(commands.contains(GridRestCommand.LOG));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNullCache() throws Exception {
+        QueryCommandHandler cmdHnd = new QueryCommandHandler(grid().context());
+
+        Integer arg1 = 1000;
+
+        Object[] arr = new Object[] {arg1, arg1};
+
+        RestQueryRequest req = new RestQueryRequest();
+
+        req.command(GridRestCommand.EXECUTE_SQL_QUERY);
+        req.queryType(RestQueryRequest.QueryType.SCAN);
+        req.typeName(Integer.class.getName());
+        req.pageSize(10);
+        req.sqlQuery("salary+>+%3F+and+salary+<%3D+%3F");
+        req.arguments(arr);
+        req.cacheName(null);
+
+        IgniteInternalFuture<GridRestResponse> resp = cmdHnd.handleAsync(req);
+        resp.get();
+
+        assertEquals("Failed to find cache with name: null", resp.result().getError());
+        assertEquals(GridRestResponse.STATUS_FAILED, resp.result().getSuccessStatus());
+        assertNull(resp.result().getResponse());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNullPageSize() throws Exception {
+        grid().getOrCreateCache(getName());
+
+        QueryCommandHandler cmdHnd = new QueryCommandHandler(grid().context());
+
+        Integer arg1 = 1000;
+
+        Object[] arr = new Object[] {arg1, arg1};
+
+        RestQueryRequest req = new RestQueryRequest();
+
+        req.command(GridRestCommand.EXECUTE_SQL_QUERY);
+        req.queryType(RestQueryRequest.QueryType.SCAN);
+        req.typeName(Integer.class.getName());
+
+        req.pageSize(null);
+        req.sqlQuery("salary+>+%3F+and+salary+<%3D+%3F");
+
+        req.arguments(arr);
+        req.cacheName(getName());
+
+        try {
+            IgniteInternalFuture<GridRestResponse> resp = cmdHnd.handleAsync(req);
+            resp.get();
+
+            fail("Expected exception not thrown.");
+        }
+        catch (IgniteCheckedException e) {
+            info("Got expected exception: " + e);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQuery() throws Exception {
+        grid().getOrCreateCache(getName());
+
+        QueryCommandHandler cmdHnd = new QueryCommandHandler(grid().context());
+
+        Integer arg1 = 1000;
+
+        Object[] arr = new Object[] {arg1, arg1};
+
+        RestQueryRequest req = new RestQueryRequest();
+
+        req.command(GridRestCommand.EXECUTE_SQL_QUERY);
+        req.queryType(RestQueryRequest.QueryType.SCAN);
+        req.typeName(Integer.class.getName());
+        req.pageSize(null);
+        req.sqlQuery("salary+>+%3F+and+salary+<%3D+%3F");
+        req.arguments(arr);
+        req.cacheName(getName());
+        req.pageSize(10);
+
+        IgniteInternalFuture<GridRestResponse> resp = cmdHnd.handleAsync(req);
+        resp.get();
+
+        assertNull(resp.result().getError());
+        assertEquals(GridRestResponse.STATUS_SUCCESS, resp.result().getSuccessStatus());
+        assertNotNull(resp.result().getResponse());
+
+        CacheQueryResult res = (CacheQueryResult) resp.result().getResponse();
+
+        assertTrue(res.getLast());
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/marshaller/MarshallerContextSelfTest.java b/modules/core/src/test/java/org/apache/ignite/marshaller/MarshallerContextSelfTest.java
new file mode 100644
index 0000000..8a0ff9a
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/marshaller/MarshallerContextSelfTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.marshaller;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import javax.cache.event.EventType;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.MarshallerContextImpl;
+import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
+import org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryManager;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static java.nio.file.Files.readAllBytes;
+
+/**
+ * Test marshaller context.
+ */
+public class MarshallerContextSelfTest extends GridCommonAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testClassName() throws Exception {
+        File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "marshaller", false);
+
+        final MarshallerContextImpl.ContinuousQueryListener queryListener =
+                new MarshallerContextImpl.ContinuousQueryListener(log, workDir);
+
+        final ArrayList evts = new ArrayList<>();
+
+        IgniteCacheProxy cache = new IgniteCacheProxy();
+
+        evts.add(new CacheContinuousQueryManager.CacheEntryEventImpl(cache,
+            EventType.CREATED,
+            1,
+            String.class.getName()));
+
+        queryListener.onUpdated(evts);
+
+        try (Ignite g1 = startGrid(1)) {
+            MarshallerContextImpl marshCtx = ((IgniteKernal)g1).context().marshallerContext();
+            String clsName = marshCtx.className(1);
+
+            assertEquals("java.lang.String", clsName);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOnUpdated() throws Exception {
+        File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "marshaller", false);
+
+        final MarshallerContextImpl.ContinuousQueryListener queryListener =
+                new MarshallerContextImpl.ContinuousQueryListener(log, workDir);
+
+        final ArrayList evts = new ArrayList<>();
+
+        IgniteCacheProxy cache = new IgniteCacheProxy();
+
+        evts.add(new CacheContinuousQueryManager.CacheEntryEventImpl(cache,
+            EventType.CREATED,
+            1,
+            String.class.getName()));
+
+        queryListener.onUpdated(evts);
+
+        String fileName = "1.classname";
+
+        assertEquals("java.lang.String", new String(readAllBytes(Paths.get(workDir + "/" + fileName))));
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
index b3ce46b..0ae6575 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
@@ -1061,7 +1061,7 @@
                     Collection<ClusterNode> nodes = top.nodes(p, AffinityTopologyVersion.NONE);
 
                     if (nodes.size() > backups + 1) {
-                        LT.warn(log, null, "Partition map was not updated yet (will wait) [grid=" + g.name() +
+                        LT.warn(log, "Partition map was not updated yet (will wait) [grid=" + g.name() +
                             ", p=" + p + ", nodes=" + F.nodeIds(nodes) + ']');
 
                         wait = true;
diff --git a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/AlwaysAssertion.java
similarity index 68%
copy from modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
copy to modules/core/src/test/java/org/apache/ignite/testframework/assertions/AlwaysAssertion.java
index a31abee..f786d4d 100644
--- a/modules/cassandra/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/AlwaysAssertion.java
@@ -15,19 +15,15 @@
  * limitations under the License.
  */
 
-package org.apache.ignite.tests.load;
+package org.apache.ignite.testframework.assertions;
 
-/**
- * Implementation of {@link org.apache.ignite.tests.load.Generator} generating {@link Integer} instance.
- */
-public class IntGenerator implements Generator {
+/** An {@link Assertion} that always passes. */
+public class AlwaysAssertion implements Assertion {
+    /** Singleton instance */
+    public static final Assertion INSTANCE = new AlwaysAssertion();
+
     /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        long val = i / 10000;
-
-        while (val > Integer.MAX_VALUE)
-            val = val / 2;
-
-        return (int)val;
+    @Override public void test() throws AssertionError {
+        // No-op.
     }
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/Assertion.java
similarity index 63%
copy from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
copy to modules/core/src/test/java/org/apache/ignite/testframework/assertions/Assertion.java
index e1fd60c..4799d88 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/Assertion.java
@@ -15,23 +15,17 @@
  * limitations under the License.
  */
 
-package org.apache.ignite.cache.store.cassandra.datasource;
+package org.apache.ignite.testframework.assertions;
 
 /**
- * Provides credentials for Cassandra (instead of specifying user/password directly in Spring context XML).
+ * An {@link Assertion} is a condition that is expected to be true. Failing that, an implementation should throw an
+ * {@link AssertionError} or specialized subclass containing information about what the assertion failed.
  */
-public interface Credentials {
+public interface Assertion {
     /**
-     * Returns user name
+     * Test that some condition has been satisfied.
      *
-     * @return user name
+     * @throws AssertionError if the condition was not satisfied.
      */
-    public String getUser();
-
-    /**
-     * Returns password
-     *
-     * @return password
-     */
-    public String getPassword();
+    public void test() throws AssertionError;
 }
diff --git a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/package-info.java
similarity index 66%
copy from modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
copy to modules/core/src/test/java/org/apache/ignite/testframework/assertions/package-info.java
index e1fd60c..a35e01b 100644
--- a/modules/cassandra/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/assertions/package-info.java
@@ -15,23 +15,8 @@
  * limitations under the License.
  */
 
-package org.apache.ignite.cache.store.cassandra.datasource;
-
 /**
- * Provides credentials for Cassandra (instead of specifying user/password directly in Spring context XML).
+ * <!-- Package description. -->
+ * Contains interfaces and classes for assertions.
  */
-public interface Credentials {
-    /**
-     * Returns user name
-     *
-     * @return user name
-     */
-    public String getUser();
-
-    /**
-     * Returns password
-     *
-     * @return password
-     */
-    public String getPassword();
-}
+package org.apache.ignite.testframework.assertions;
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
index 9f507e6..30c7244 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
@@ -1537,10 +1537,18 @@
 
     /**
      * @param gridName Grid name.
+     * @return {@code True} if the name of the grid indicates that it was the first started (on this JVM).
+     */
+    protected boolean isFirstGrid(String gridName) {
+        return "0".equals(gridName.substring(getTestGridName().length()));
+    }
+
+    /**
+     * @param gridName Grid name.
      * @return <code>True</code> if test was run in multi-JVM mode and grid with this name was started at another JVM.
      */
     protected boolean isRemoteJvm(String gridName) {
-        return isMultiJvm() && !"0".equals(gridName.substring(getTestGridName().length()));
+        return isMultiJvm() && !isFirstGrid(gridName);
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java
index b559897..5722fa3 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java
@@ -230,6 +230,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) {
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg) {
         return null;
     }
@@ -261,6 +266,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) {
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
         return null;
     }
@@ -276,6 +286,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void destroyCaches(Collection<String> cacheNames) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
     @Override public IgniteTransactions transactions() {
         return null;
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
index e475754..e0432a0 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
@@ -486,7 +486,7 @@
 
                                 if (affNodes.size() != owners.size() || !affNodes.containsAll(owners) ||
                                     (waitEvicts && loc != null && loc.state() != GridDhtPartitionState.OWNING)) {
-                                    LT.warn(log(), null, "Waiting for topology map update [" +
+                                    LT.warn(log(), "Waiting for topology map update [" +
                                         "grid=" + g.name() +
                                         ", cache=" + cfg.getName() +
                                         ", cacheId=" + dht.context().cacheId() +
@@ -503,7 +503,7 @@
                                     match = true;
                             }
                             else {
-                                LT.warn(log(), null, "Waiting for topology map update [" +
+                                LT.warn(log(), "Waiting for topology map update [" +
                                     "grid=" + g.name() +
                                     ", cache=" + cfg.getName() +
                                     ", cacheId=" + dht.context().cacheId() +
@@ -569,7 +569,7 @@
                                     }
 
                                     if (entry.getValue() != GridDhtPartitionState.OWNING) {
-                                        LT.warn(log(), null,
+                                        LT.warn(log(),
                                             "Waiting for correct partition state, should be OWNING [state=" +
                                                 entry.getValue() + "]");
 
@@ -1148,7 +1148,7 @@
      * @return Result of closure execution.
      * @throws Exception If failed.
      */
-    protected <T> T doInTransaction(Ignite ignite,
+    protected static <T> T doInTransaction(Ignite ignite,
         TransactionConcurrency concurrency,
         TransactionIsolation isolation,
         Callable<T> clo) throws Exception {
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridRollingRestartAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridRollingRestartAbstractTest.java
new file mode 100644
index 0000000..6a7973c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridRollingRestartAbstractTest.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testframework.junits.common;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.testframework.assertions.AlwaysAssertion;
+import org.apache.ignite.testframework.assertions.Assertion;
+import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+/**
+ * Base class for tests which use a {@link RollingRestartThread} to stop and start
+ * remote grid JVMs for failover testing.
+ */
+public abstract class GridRollingRestartAbstractTest extends GridCommonAbstractTest {
+    /** Thread that shuts down and restarts Grid nodes for this test. */
+    protected RollingRestartThread rollingRestartThread;
+
+    /** Default predicate used to determine if a Grid node should be restarted. */
+    protected final IgnitePredicate<Ignite> dfltRestartCheck = new IgnitePredicate<Ignite>() {
+        @Override public boolean apply(Ignite ignite) {
+            return serverCount() <= ignite.cluster().forServers().nodes().size();
+        }
+    };
+
+    /**
+     * @return The predicate used to determine if a Grid node should be restarted.
+     */
+    public IgnitePredicate<Ignite> getRestartCheck() {
+        return dfltRestartCheck;
+    }
+
+    /**
+     * Return the {@link Assertion} used to assert some condition before a node is
+     * stopped and started. If the assertion fails, the test will fail with that
+     * assertion.
+     *
+     * @return Assertion that will be tested before a node is restarted.
+     */
+    public Assertion getRestartAssertion() {
+        return AlwaysAssertion.INSTANCE;
+    }
+
+    /**
+     * @return The maximum number of times to perform a restart before exiting (&lt;= 0 implies no limit).
+     */
+    public int getMaxRestarts() {
+        return 3;
+    }
+
+    /**
+     * @return The amount of time in milliseconds to wait between node restarts.
+     */
+    public int getRestartInterval() {
+        return 5000;
+    }
+
+    /**
+     * @return The number of server nodes to start.
+     */
+    public abstract int serverCount();
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        if (isFirstGrid(gridName)) {
+            cfg.setClientMode(true);
+
+            assert cfg.getDiscoverySpi() instanceof TcpDiscoverySpi;
+
+            ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setForceServerMode(true);
+        }
+
+        cfg.setCacheConfiguration(getCacheConfiguration());
+
+        return cfg;
+    }
+
+    /**
+     * @return The cache configuration for the test cache.
+     */
+    protected abstract CacheConfiguration<?, ?> getCacheConfiguration();
+
+    /** {@inheritDoc} */
+    @Override protected boolean isMultiJvm() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        // the +1 includes this JVM (the client)
+        startGrids(serverCount() + 1);
+
+        rollingRestartThread = new RollingRestartThread();
+
+        rollingRestartThread.start();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        stopAllGrids();
+
+        rollingRestartThread.shutdown();
+    }
+
+
+    /**
+     * Thread that performs a "rolling restart" of a set of Ignite grid processes.
+     * */
+    protected class RollingRestartThread extends Thread {
+        /** Running flag. */
+        private volatile boolean isRunning;
+
+        /** The total number of restarts performed by this thread. */
+        private volatile int restartTotal;
+
+        /** Index of Ignite grid that was most recently restarted. */
+        private int currRestartGridId;
+
+        /**
+         * Create a new {@link RollingRestartThread} that will stop and start Ignite Grid
+         * processes managed by the given test. The thread will check the given
+         * {@link #getRestartCheck()} predicate every {@link #getRestartInterval()} milliseconds and
+         * when it returns true, will start and then stop a Java process
+         * via the test class.
+         */
+        public RollingRestartThread() {
+            if (getRestartInterval() < 0)
+                throw new IllegalArgumentException("invalid restart interval: " + getRestartInterval());
+
+            setDaemon(true);
+
+            setName(RollingRestartThread.class.getSimpleName());
+        }
+
+        /**
+         * @return The total number of process restarts performed by this thread.
+         */
+        public int getRestartTotal() {
+            return restartTotal;
+        }
+
+        /**
+         * Stop the rolling restart thread and wait for it to fully exit.
+         *
+         * @throws InterruptedException If the calling thread was interrupted while waiting for
+         * the rolling restart thread to exit.
+         */
+        public synchronized void shutdown() throws InterruptedException {
+            isRunning = false;
+
+            interrupt();
+
+            join();
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized void start() {
+            isRunning = true;
+
+            super.start();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            Ignite ignite = grid(0);
+
+            ignite.log().info(getName() + ": started.");
+
+            IgnitePredicate<Ignite> restartCheck = getRestartCheck();
+
+            Assertion restartAssertion = getRestartAssertion();
+
+            while (isRunning) {
+                try {
+                    if (getRestartInterval() > 0)
+                        Thread.sleep(getRestartInterval());
+                    else
+                        Thread.yield();
+
+                    if (restartCheck.apply(ignite)) {
+                        restartAssertion.test();
+
+                        int restartGrid = nextGridToRestart();
+
+                        stopGrid(restartGrid);
+
+                        ignite.log().info(getName() + ": stopped a process.");
+
+                        startGrid(restartGrid);
+
+                        ignite.log().info(getName() + ": started a process.");
+
+                        int restartCnt = ++restartTotal;
+
+                        if (getMaxRestarts() > 0 && restartCnt >= getMaxRestarts())
+                            isRunning = false;
+                    }
+                }
+                catch (RuntimeException e) {
+                    if (isRunning) {
+                        StringWriter sw = new StringWriter();
+
+                        e.printStackTrace(new PrintWriter(sw));
+
+                        ignite.log().info(getName() + ": caught exception: " + sw.toString());
+                    }
+                    else
+                        ignite.log().info(getName() + ": caught exception while exiting: " + e);
+                }
+                catch (InterruptedException e) {
+                    Thread.currentThread().interrupt();
+
+                    if (isRunning) {
+                        StringWriter sw = new StringWriter();
+
+                        e.printStackTrace(new PrintWriter(sw));
+
+                        ignite.log().info(getName() + ": was interrupted: " + sw.toString());
+                    }
+                    else
+                        ignite.log().info(getName() + ": was interrupted while exiting: " + e);
+
+                    isRunning = false;
+                }
+                catch (AssertionError e) {
+                    StringWriter sw = new StringWriter();
+
+                    e.printStackTrace(new PrintWriter(sw));
+
+                    ignite.log().info(getName() + ": assertion failed: " + sw.toString());
+
+                    isRunning = false;
+                }
+            }
+
+            ignite.log().info(getName() + ": exited.");
+        }
+
+        /**
+         * Return the index of the next Grid to restart.
+         *
+         * @return Index of the next grid to start.
+         * @see #currRestartGridId
+         * @see GridRollingRestartAbstractTest#grid(int)
+         */
+        protected int nextGridToRestart() {
+            if (currRestartGridId == serverCount())
+                currRestartGridId = 0;
+
+            // Skip grid 0 because this is the "client" - the JVM that
+            // is executing the test.
+            return ++currRestartGridId;
+        }
+
+        /**
+         * Start the Grid at the given index.
+         *
+         * @param idx Index of Grid to start.
+         * @see GridRollingRestartAbstractTest#grid(int)
+         */
+        protected void startGrid(int idx) {
+            try {
+                GridRollingRestartAbstractTest.this.startGrid(idx);
+            }
+            catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        /**
+         * Stop the process for the Grid at the given index.
+         *
+         * @param idx Index of Grid to stop.
+         * @see GridRollingRestartAbstractTest#grid(int)
+         */
+        protected void stopGrid(int idx) {
+            Ignite remote = grid(idx);
+
+            assert remote instanceof IgniteProcessProxy : remote;
+
+            IgniteProcessProxy proc = (IgniteProcessProxy) remote;
+
+            int pid = proc.getProcess().getPid();
+
+            try {
+                grid(0).log().info(String.format("Killing grid id %d with PID %d", idx, pid));
+
+                IgniteProcessProxy.kill(proc.name());
+
+                grid(0).log().info(String.format("Grid id %d with PID %d stopped", idx, pid));
+            }
+            catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java
index 2598bc5..9bb5205 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java
@@ -213,8 +213,10 @@
     }
 
     /**
+     * Gracefully shut down the Grid.
+     *
      * @param gridName Grid name.
-     * @param cancel Cacnel flag.
+     * @param cancel If {@code true} then all jobs currently will be cancelled.
      */
     public static void stop(String gridName, boolean cancel) {
         IgniteProcessProxy proxy = gridProxies.get(gridName);
@@ -227,6 +229,26 @@
     }
 
     /**
+     * Forcefully shut down the Grid.
+     *
+     * @param gridName Grid name.
+     */
+    public static void kill(String gridName) {
+        IgniteProcessProxy proxy = gridProxies.get(gridName);
+
+        A.notNull(gridName, "gridName");
+
+        try {
+            proxy.getProcess().kill();
+        }
+        catch (Exception e) {
+            U.error(proxy.log, "Exception while killing " + gridName, e);
+        }
+
+        gridProxies.remove(gridName, proxy);
+    }
+
+    /**
      * @param locNodeId ID of local node the requested grid instance is managing.
      * @return An instance of named grid. This method never returns {@code null}.
      * @throws IgniteIllegalStateException Thrown if grid was not properly initialized or grid instance was stopped or
@@ -426,6 +448,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) {
+        throw new UnsupportedOperationException("Operation isn't supported yet.");
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
         throw new UnsupportedOperationException("Operation isn't supported yet.");
     }
@@ -441,6 +468,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) {
+        throw new UnsupportedOperationException("Operation isn't supported yet.");
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> void addCacheConfiguration(CacheConfiguration<K, V> cacheCfg) {
         throw new UnsupportedOperationException("Operation isn't supported yet.");
     }
@@ -477,6 +509,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void destroyCaches(Collection<String> cacheNames) {
+        throw new UnsupportedOperationException("Operation isn't supported yet.");
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> cache(@Nullable final String name) {
         return new IgniteCacheProcessProxy<>(name, this);
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
index 62c2eb3..3dca5e1 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.GridSelfTest;
 import org.apache.ignite.internal.GridStartStopSelfTest;
 import org.apache.ignite.internal.GridStopWithCancelSelfTest;
+import org.apache.ignite.internal.IgniteLocalNodeMapBeforeStartTest;
 import org.apache.ignite.internal.IgniteSlowClientDetectionSelfTest;
 import org.apache.ignite.internal.MarshallerContextLockingSelfTest;
 import org.apache.ignite.internal.processors.affinity.GridAffinityProcessorRendezvousSelfTest;
@@ -50,6 +51,7 @@
 import org.apache.ignite.internal.util.nio.IgniteExceptionInNioWorkerSelfTest;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.marshaller.DynamicProxySerializationMultiJvmSelfTest;
+import org.apache.ignite.marshaller.MarshallerContextSelfTest;
 import org.apache.ignite.messaging.GridMessagingNoPeerClassLoadingSelfTest;
 import org.apache.ignite.messaging.GridMessagingSelfTest;
 import org.apache.ignite.messaging.IgniteMessagingWithClientTest;
@@ -129,7 +131,7 @@
         suite.addTestSuite(GridLocalIgniteSerializationTest.class);
 
         suite.addTestSuite(IgniteExceptionInNioWorkerSelfTest.class);
-
+        suite.addTestSuite(IgniteLocalNodeMapBeforeStartTest.class);
         suite.addTestSuite(OdbcProcessorValidationSelfTest.class);
         suite.addTestSuite(OdbcEscapeSequenceSelfTest.class);
 
@@ -142,6 +144,7 @@
         suite.addTestSuite(NotStringSystemPropertyTest.class);
 
         suite.addTestSuite(MarshallerContextLockingSelfTest.class);
+        suite.addTestSuite(MarshallerContextSelfTest.class);
 
         return suite;
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
index d62369c..45a49bf 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheDataStructuresSelfTestSuite.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteClientDiscoveryDataStructuresTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteDataStructureUniqueNameTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteDataStructureWithJobTest;
+import org.apache.ignite.internal.processors.cache.datastructures.SemaphoreFailoverSafeReleasePermitsTest;
 import org.apache.ignite.internal.processors.cache.datastructures.local.GridCacheLocalAtomicOffheapSetSelfTest;
 import org.apache.ignite.internal.processors.cache.datastructures.local.GridCacheLocalAtomicQueueApiSelfTest;
 import org.apache.ignite.internal.processors.cache.datastructures.local.GridCacheLocalAtomicSetSelfTest;
@@ -144,6 +145,7 @@
         suite.addTest(new TestSuite(IgnitePartitionedCountDownLatchSelfTest.class));
         suite.addTest(new TestSuite(IgniteDataStructureWithJobTest.class));
         suite.addTest(new TestSuite(IgnitePartitionedSemaphoreSelfTest.class));
+        suite.addTest(new TestSuite(SemaphoreFailoverSafeReleasePermitsTest.class));
         // TODO IGNITE-3141, enabled when fixed.
         // suite.addTest(new TestSuite(IgnitePartitionedLockSelfTest.class));
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheFailoverTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheFailoverTestSuite.java
index c9e507d..26cea39 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheFailoverTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheFailoverTestSuite.java
@@ -41,6 +41,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearRemoveFailureTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicPrimaryWriteOrderNearRemoveFailureTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearRemoveFailureTest;
+import org.apache.ignite.internal.processors.cache.distributed.rebalancing.GridCacheRebalancingPartitionDistributionTest;
 import org.apache.ignite.testframework.GridTestUtils;
 
 /**
@@ -65,6 +66,7 @@
 
         suite.addTestSuite(GridCacheAtomicInvalidPartitionHandlingSelfTest.class);
         suite.addTestSuite(GridCacheAtomicClientInvalidPartitionHandlingSelfTest.class);
+        suite.addTestSuite(GridCacheRebalancingPartitionDistributionTest.class);
 
         GridTestUtils.addTestIfNeeded(suite, GridCacheIncrementTransformTest.class, ignoredTests);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
index fd13e98..435fcfb 100755
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
@@ -81,6 +81,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheStopSelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheStorePutxSelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheStoreValueBytesSelfTest;
+import org.apache.ignite.internal.processors.cache.GridCacheSwapCleanupTest;
 import org.apache.ignite.internal.processors.cache.GridCacheSwapPreloadSelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheSwapReloadSelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheTtlManagerEvictionSelfTest;
@@ -230,6 +231,7 @@
         // Swap tests.
         suite.addTestSuite(GridCacheSwapPreloadSelfTest.class);
         suite.addTestSuite(GridCacheSwapReloadSelfTest.class);
+        suite.addTestSuite(GridCacheSwapCleanupTest.class);
 
         // Common tests.
         suite.addTestSuite(CacheNamesSelfTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java
index dc412a9..3fc27de 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java
@@ -72,6 +72,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridCachePartitionedPreloadEventsSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridCachePartitionedTopologyChangeSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridCachePartitionedUnloadEventsSelfTest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCachePartitionedBackupNodeFailureRecoveryTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearEvictionEventSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearMultiNodeSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearReadersSelfTest;
@@ -259,6 +260,7 @@
         suite.addTest(new TestSuite(CacheEnumOperationsSingleNodeTest.class));
         suite.addTest(new TestSuite(CacheEnumOperationsTest.class));
         suite.addTest(new TestSuite(IgniteCacheIncrementTxTest.class));
+        suite.addTest(new TestSuite(IgniteCachePartitionedBackupNodeFailureRecoveryTest.class));
 
         suite.addTest(new TestSuite(IgniteNoCustomEventsOnNodeStart.class));
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite5.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite5.java
index 7582f5c..7f0e23c 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite5.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite5.java
@@ -25,6 +25,7 @@
 import org.apache.ignite.internal.processors.cache.IgniteCachePutStackOverflowSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheReadThroughEvictionsVariationsSuite;
 import org.apache.ignite.internal.processors.cache.IgniteCacheStoreCollectionTest;
+import org.apache.ignite.internal.processors.cache.GridCacheOffHeapCleanupTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheLateAffinityAssignmentFairAffinityTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheLateAffinityAssignmentNodeJoinValidationTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheLateAffinityAssignmentTest;
@@ -59,6 +60,8 @@
         suite.addTest(IgniteCacheReadThroughEvictionsVariationsSuite.suite());
         suite.addTestSuite(IgniteCacheTxIteratorSelfTest.class);
 
+        suite.addTestSuite(GridCacheOffHeapCleanupTest.class);
+
         return suite;
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
index d9cc8c0..2d06f3a 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
@@ -66,6 +66,7 @@
 import org.apache.ignite.internal.processors.service.IgniteServiceDeploymentClassLoadingDefaultMarshallerTest;
 import org.apache.ignite.internal.processors.service.IgniteServiceDeploymentClassLoadingJdkMarshallerTest;
 import org.apache.ignite.internal.processors.service.IgniteServiceDeploymentClassLoadingOptimizedMarshallerTest;
+import org.apache.ignite.internal.processors.service.IgniteServiceDynamicCachesSelfTest;
 import org.apache.ignite.internal.processors.service.IgniteServiceReassignmentTest;
 import org.apache.ignite.internal.processors.service.ServicePredicateAccessCacheTest;
 import org.apache.ignite.internal.util.GridStartupWithUndefinedIgniteHomeSelfTest;
@@ -141,6 +142,7 @@
         suite.addTestSuite(GridServiceProxyNodeStopSelfTest.class);
         suite.addTestSuite(GridServiceProxyClientReconnectSelfTest.class);
         suite.addTestSuite(IgniteServiceReassignmentTest.class);
+        suite.addTestSuite(IgniteServiceDynamicCachesSelfTest.class);
 
         suite.addTestSuite(IgniteServiceDeploymentClassLoadingDefaultMarshallerTest.class);
         suite.addTestSuite(IgniteServiceDeploymentClassLoadingOptimizedMarshallerTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteRestHandlerTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteRestHandlerTestSuite.java
index 42c6752..6263e8b 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteRestHandlerTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteRestHandlerTestSuite.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.processors.rest.handlers.cache.GridCacheAtomicCommandHandlerSelfTest;
 import org.apache.ignite.internal.processors.rest.handlers.cache.GridCacheCommandHandlerSelfTest;
 import org.apache.ignite.internal.processors.rest.handlers.log.GridLogCommandHandlerTest;
+import org.apache.ignite.internal.processors.rest.handlers.query.GridQueryCommandHandlerTest;
 
 /**
  * REST support tests.
@@ -36,6 +37,7 @@
         suite.addTestSuite(GridCacheCommandHandlerSelfTest.class);
         suite.addTestSuite(GridCacheAtomicCommandHandlerSelfTest.class);
         suite.addTestSuite(GridLogCommandHandlerTest.class);
+        suite.addTestSuite(GridQueryCommandHandlerTest.class);
 
         return suite;
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
index bd7bb96..078b865 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
@@ -36,6 +36,7 @@
 import org.apache.ignite.spi.discovery.ClusterMetricsSnapshotSerializeSelfTest;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.thread.GridThreadPoolExecutorServiceSelfTest;
+import org.apache.ignite.thread.IgniteThreadPoolSizeTest;
 import org.apache.ignite.util.GridLongListSelfTest;
 import org.apache.ignite.util.GridMessageCollectionTest;
 import org.apache.ignite.util.GridQueueSelfTest;
@@ -64,6 +65,7 @@
         TestSuite suite = new TestSuite("Ignite Util Test Suite");
 
         suite.addTestSuite(GridThreadPoolExecutorServiceSelfTest.class);
+        suite.addTestSuite(IgniteThreadPoolSizeTest.class);
         GridTestUtils.addTestIfNeeded(suite, IgniteUtilsSelfTest.class, ignoredTests);
         suite.addTestSuite(GridSpinReadWriteLockSelfTest.class);
         suite.addTestSuite(GridQueueSelfTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/thread/IgniteThreadPoolSizeTest.java b/modules/core/src/test/java/org/apache/ignite/thread/IgniteThreadPoolSizeTest.java
new file mode 100644
index 0000000..d1fd4e7
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/thread/IgniteThreadPoolSizeTest.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.thread;
+
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class IgniteThreadPoolSizeTest extends GridCommonAbstractTest {
+    /** Wrong thread pool size value for testing */
+    private static final int WRONG_VALUE = 0;
+
+    /**
+     * @return Ignite configuration.
+     */
+    private IgniteConfiguration configuration() {
+        return new IgniteConfiguration();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAsyncCallbackPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setAsyncCallbackPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIgfsThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setIgfsThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testManagementThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setManagementThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPeerClassLoadingThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setPeerClassLoadingThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPublicThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setPublicThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebalanceThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setRebalanceThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSystemThreadPoolSize() throws Exception {
+        testWrongPoolSize(configuration().setSystemThreadPoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUtilityCachePoolSize() throws Exception {
+        testWrongPoolSize(configuration().setUtilityCachePoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("deprecated")
+    public void testMarshallerCachePoolSize() throws Exception {
+        testWrongPoolSize(configuration().setMarshallerCachePoolSize(WRONG_VALUE));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testConnectorThreadPoolSize() throws Exception {
+        final IgniteConfiguration cfg = configuration();
+
+        cfg.getConnectorConfiguration().setThreadPoolSize(WRONG_VALUE);
+
+        testWrongPoolSize(cfg);
+    }
+
+    /**
+     * Performs testing for wrong tread pool size.
+     *
+     * @param cfg an IgniteConfiguration with the only one thread pool size assigned with the WRONG_VALUE.
+     * @throws Exception If failed.
+     */
+    private void testWrongPoolSize(IgniteConfiguration cfg) throws Exception {
+        try {
+            Ignition.start(cfg);
+
+            fail();
+        }
+        catch (IgniteException ex) {
+            assertNotNull(ex.getMessage());
+            assertTrue(ex.getMessage().contains("thread pool size"));
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/util/GridLogThrottleTest.java b/modules/core/src/test/java/org/apache/ignite/util/GridLogThrottleTest.java
index d9540a8..9eac0cc 100644
--- a/modules/core/src/test/java/org/apache/ignite/util/GridLogThrottleTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/util/GridLogThrottleTest.java
@@ -53,26 +53,20 @@
         // LOGGED.
         LT.error(log, new RuntimeException("Test exception 2."), "Test");
 
-        // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 1."), "Test");
-
-        // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 2."), "Test1");
-
-        // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 2."), "Test3");
-
         // LOGGED.
         LT.error(log, null, "Test - without throwable.");
 
         // OMITTED.
         LT.error(log, null, "Test - without throwable.");
 
+        // OMITTED.
+        LT.warn(log, "Test - without throwable.");
+
         // LOGGED.
-        LT.warn(log, null, "Test - without throwable1.");
+        LT.warn(log, "Test - without throwable1.");
 
         // OMITTED.
-        LT.warn(log, null, "Test - without throwable1.");
+        LT.warn(log, "Test - without throwable1.");
 
         Thread.sleep(LT.throttleTimeout());
 
@@ -90,14 +84,11 @@
         // LOGGED.
         LT.error(log, new RuntimeException("Test exception 2."), "Test");
 
-        // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 1."), "Test");
+        // LOGGED.
+        LT.warn(log, "Test - without throwable.");
 
         // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 2."), "Test1");
-
-        // OMITTED.
-        LT.warn(log, new RuntimeException("Test exception 2."), "Test3");
+        LT.warn(log, "Test - without throwable.");
 
         Thread.sleep(LT.throttleTimeout());
 
@@ -121,4 +112,4 @@
         //OMMITED.
         LT.info(log(), "Test info message.");
     }
-}
\ No newline at end of file
+}
diff --git a/modules/docker/1.7.0/Dockerfile b/modules/docker/1.7.0/Dockerfile
new file mode 100644
index 0000000..5565df6
--- /dev/null
+++ b/modules/docker/1.7.0/Dockerfile
@@ -0,0 +1,44 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Start from a Java image.
+FROM java:8
+
+# Ignite version
+ENV IGNITE_VERSION 1.7.0
+
+# Ignite home
+ENV IGNITE_HOME /opt/ignite/apache-ignite-fabric-${IGNITE_VERSION}-bin
+
+# Do not rely on anything provided by base image(s), but be explicit, if they are installed already it is noop then
+RUN apt-get update && apt-get install -y --no-install-recommends \
+        unzip \
+        curl \
+    && rm -rf /var/lib/apt/lists/*
+
+WORKDIR /opt/ignite
+
+RUN curl http://www.us.apache.org/dist/ignite/${IGNITE_VERSION}/apache-ignite-fabric-${IGNITE_VERSION}-bin.zip -o ignite.zip \
+    && unzip ignite.zip \
+    && rm ignite.zip
+
+# Copy sh files and set permission
+COPY ./run.sh $IGNITE_HOME/
+
+RUN chmod +x $IGNITE_HOME/run.sh
+
+CMD $IGNITE_HOME/run.sh
\ No newline at end of file
diff --git a/modules/docker/1.7.0/run.sh b/modules/docker/1.7.0/run.sh
new file mode 100644
index 0000000..3aafc30
--- /dev/null
+++ b/modules/docker/1.7.0/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+if [ ! -z "$OPTION_LIBS" ]; then
+  IFS=, LIBS_LIST=("$OPTION_LIBS")
+
+  for lib in ${LIBS_LIST[@]}; do
+    cp -r $IGNITE_HOME/libs/optional/"$lib"/* \
+        $IGNITE_HOME/libs/
+  done
+fi
+
+if [ ! -z "$EXTERNAL_LIBS" ]; then
+  IFS=, LIBS_LIST=("$EXTERNAL_LIBS")
+
+  for lib in ${LIBS_LIST[@]}; do
+    echo $lib >> temp
+  done
+
+  wget -i temp -P $IGNITE_HOME/libs
+
+  rm temp
+fi
+
+QUIET=""
+
+if [ "$IGNITE_QUIET" = "false" ]; then
+  QUIET="-v"
+fi
+
+if [ -z $CONFIG_URI ]; then
+  $IGNITE_HOME/bin/ignite.sh $QUIET
+else
+  $IGNITE_HOME/bin/ignite.sh $QUIET $CONFIG_URI
+fi
+
diff --git a/modules/docker/Dockerfile b/modules/docker/Dockerfile
index 959771b..5565df6 100644
--- a/modules/docker/Dockerfile
+++ b/modules/docker/Dockerfile
@@ -19,7 +19,7 @@
 FROM java:8
 
 # Ignite version
-ENV IGNITE_VERSION 1.6.0
+ENV IGNITE_VERSION 1.7.0
 
 # Ignite home
 ENV IGNITE_HOME /opt/ignite/apache-ignite-fabric-${IGNITE_VERSION}-bin
diff --git a/modules/flink/src/main/java/org/apache/ignite/sink/flink/IgniteSink.java b/modules/flink/src/main/java/org/apache/ignite/sink/flink/IgniteSink.java
index e0ae783..2f18f80 100644
--- a/modules/flink/src/main/java/org/apache/ignite/sink/flink/IgniteSink.java
+++ b/modules/flink/src/main/java/org/apache/ignite/sink/flink/IgniteSink.java
@@ -34,7 +34,7 @@
     private static final long DFLT_FLUSH_FREQ = 10000L;
 
     /** Logger. */
-    private final IgniteLogger log;
+    private final transient IgniteLogger log;
 
     /** Automatic flush frequency. */
     private long autoFlushFrequency = DFLT_FLUSH_FREQ;
diff --git a/modules/flume/README.txt b/modules/flume/README.txt
index bf7e0ff..adcd021 100644
--- a/modules/flume/README.txt
+++ b/modules/flume/README.txt
@@ -2,7 +2,7 @@
 -------------------------------
 
 IgniteSink is a Flume sink that extracts Events from an associated Flume channel and injects into an Ignite cache.
-Flume 1.6.0 is supported.
+Flume 1.7.0 is supported.
 
 IgniteSink, which can be found in 'optional/ignite-flume', and its dependencies have to be included in the agent's classpath,
 as described in the following subsection, before starting the Flume agent.
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
index 6d903d8..bc047e7 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
@@ -856,7 +856,7 @@
             catch (IgniteCheckedException e) {
                 if (e.hasCause(IpcOutOfSystemResourcesException.class))
                     // Has cause or is itself the IpcOutOfSystemResourcesException.
-                    LT.warn(log, null, OUT_OF_RESOURCES_TCP_MSG);
+                    LT.warn(log, OUT_OF_RESOURCES_TCP_MSG);
                 else if (log.isDebugEnabled())
                     log.debug("Failed to establish shared memory connection with local hadoop process: " +
                         desc);
@@ -1059,7 +1059,7 @@
                         ", err=" + e + ']');
 
                 if (X.hasCause(e, SocketTimeoutException.class))
-                    LT.warn(log, null, "Connect timed out (consider increasing 'connTimeout' " +
+                    LT.warn(log, "Connect timed out (consider increasing 'connTimeout' " +
                         "configuration property) [addr=" + addr + ", port=" + port + ']');
 
                 if (errs == null)
@@ -1084,7 +1084,7 @@
             assert errs != null;
 
             if (X.hasCause(errs, ConnectException.class))
-                LT.warn(log, null, "Failed to connect to a remote Hadoop process (is process still running?). " +
+                LT.warn(log, "Failed to connect to a remote Hadoop process (is process still running?). " +
                     "Make sure operating system firewall is disabled on local and remote host) " +
                     "[addrs=" + addr + ", port=" + port + ']');
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridH2ResultSetIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridH2ResultSetIterator.java
index 3603bb5..e0680d3 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridH2ResultSetIterator.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/GridH2ResultSetIterator.java
@@ -17,24 +17,49 @@
 
 package org.apache.ignite.internal.processors.query.h2;
 
+import java.lang.reflect.Field;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.NoSuchElementException;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2ValueCacheObject;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
-
+import org.h2.jdbc.JdbcResultSet;
+import org.h2.result.ResultInterface;
+import org.h2.value.Value;
 
 /**
  * Iterator over result set.
  */
 public abstract class GridH2ResultSetIterator<T> extends GridCloseableIteratorAdapter<T> {
     /** */
+    private static final Field RESULT_FIELD;
+
+    /**
+     * Initialize.
+     */
+    static {
+        try {
+            RESULT_FIELD = JdbcResultSet.class.getDeclaredField("result");
+
+            RESULT_FIELD.setAccessible(true);
+        }
+        catch (NoSuchFieldException e) {
+            throw new IllegalStateException("Check H2 version in classpath.", e);
+        }
+    }
+
+    /** */
     private static final long serialVersionUID = 0L;
 
     /** */
+    private final ResultInterface res;
+
+    /** */
     private final ResultSet data;
 
     /** */
@@ -49,12 +74,20 @@
     /**
      * @param data Data array.
      * @param closeStmt If {@code true} closes result set statement when iterator is closed.
+     * @param needCpy {@code True} if need copy cache object's value.
      * @throws IgniteCheckedException If failed.
      */
-    protected GridH2ResultSetIterator(ResultSet data, boolean closeStmt) throws IgniteCheckedException {
+    protected GridH2ResultSetIterator(ResultSet data, boolean closeStmt, boolean needCpy) throws IgniteCheckedException {
         this.data = data;
         this.closeStmt = closeStmt;
 
+        try {
+            res = needCpy ? (ResultInterface)RESULT_FIELD.get(data) : null;
+        }
+        catch (IllegalAccessException e) {
+            throw new IllegalStateException(e); // Must not happen.
+        }
+
         if (data != null) {
             try {
                 row = new Object[data.getMetaData().getColumnCount()];
@@ -78,8 +111,27 @@
             if (!data.next())
                 return false;
 
-            for (int c = 0; c < row.length; c++)
-                row[c] = data.getObject(c + 1);
+            if (res != null) {
+                Value[] values = res.currentRow();
+
+                for (int c = 0; c < row.length; c++) {
+                    Value val = values[c];
+
+                    if (val instanceof GridH2ValueCacheObject) {
+                        GridH2ValueCacheObject valCacheObj = (GridH2ValueCacheObject)values[c];
+
+                        GridCacheContext cctx = valCacheObj.getCacheContext();
+
+                        row[c] = valCacheObj.getObject(cctx != null && cctx.needValueCopy());
+                    }
+                    else
+                        row[c] = val.getObject();
+                }
+            }
+            else {
+                for (int c = 0; c < row.length; c++)
+                    row[c] = data.getObject(c + 1);
+            }
 
             return true;
         }
@@ -134,6 +186,6 @@
 
     /** {@inheritDoc} */
     @Override public String toString() {
-        return S.toString((Class<GridH2ResultSetIterator>)getClass(), this);
+        return S.toString(GridH2ResultSetIterator.class, this);
     }
 }
\ No newline at end of file
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
index ed42bc6..6da8758 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
@@ -996,7 +996,7 @@
                 String longMsg = "Query execution is too long [time=" + time + " ms, sql='" + sql + '\'' +
                     ", plan=" + U.nl() + plan.getString(1) + U.nl() + ", parameters=" + params + "]";
 
-                LT.warn(log, null, longMsg, msg);
+                LT.warn(log, longMsg, msg);
             }
 
             return rs;
@@ -2566,7 +2566,7 @@
          * @throws IgniteCheckedException If failed.
          */
         protected FieldsIterator(ResultSet data) throws IgniteCheckedException {
-            super(data, false);
+            super(data, false, true);
         }
 
         /** {@inheritDoc} */
@@ -2591,7 +2591,7 @@
          * @throws IgniteCheckedException If failed.
          */
         protected KeyValIterator(ResultSet data) throws IgniteCheckedException {
-            super(data, false);
+            super(data, false, true);
         }
 
         /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
index 9486a2e..8e7b161 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
@@ -98,7 +98,7 @@
 
     /** {@inheritDoc} */
     @Override public void setKey(long key) {
-        throw new UnsupportedOperationException();
+        // No-op, may be set in H2 INFORMATION_SCHEMA.
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2ValueCacheObject.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2ValueCacheObject.java
index 92bbabb..a2f5c88 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2ValueCacheObject.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2ValueCacheObject.java
@@ -121,7 +121,15 @@
 
     /** {@inheritDoc} */
     @Override public Object getObject() {
-        return obj.isPlatformType() ? obj.value(objectContext(), false) : obj;
+        return getObject(false);
+    }
+
+    /**
+     * @param cpy Copy flag.
+     * @return Value.
+     */
+    public Object getObject(boolean cpy) {
+        return obj.isPlatformType() ? obj.value(objectContext(), cpy) : obj;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
index 0314b3d..ac1a6a6 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
@@ -59,6 +59,7 @@
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2ValueCacheObject;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryFailResponse;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageRequest;
@@ -936,6 +937,9 @@
         private final int rowCnt;
 
         /** */
+        private boolean cpNeeded;
+
+        /** */
         private volatile boolean closed;
 
         /**
@@ -944,11 +948,12 @@
          * @param qrySrcNodeId Query source node.
          * @param qry Query.
          */
-        private QueryResult(ResultSet rs, GridCacheContext<?,?> cctx, UUID qrySrcNodeId, GridCacheSqlQuery qry) {
+        private QueryResult(ResultSet rs, GridCacheContext<?, ?> cctx, UUID qrySrcNodeId, GridCacheSqlQuery qry) {
             this.rs = rs;
             this.cctx = cctx;
             this.qry = qry;
             this.qrySrcNodeId = qrySrcNodeId;
+            this.cpNeeded = cctx.isLocalNode(qrySrcNodeId);
 
             try {
                 res = (ResultInterface)RESULT_FIELD.get(rs);
@@ -980,6 +985,33 @@
 
                 Value[] row = res.currentRow();
 
+                if (cpNeeded) {
+                    boolean copied = false;
+
+                    for (int j = 0; j < row.length; j++) {
+                        Value val = row[j];
+
+                        if (val instanceof GridH2ValueCacheObject) {
+                            GridH2ValueCacheObject valCacheObj = (GridH2ValueCacheObject)val;
+
+                            GridCacheContext cctx = valCacheObj.getCacheContext();
+
+                            if (cctx != null && cctx.needValueCopy()) {
+                                row[j] = new GridH2ValueCacheObject(valCacheObj.getCacheContext(), valCacheObj.getCacheObject()) {
+                                    @Override public Object getObject() {
+                                        return getObject(true);
+                                    }
+                                };
+
+                                copied = true;
+                            }
+                        }
+                    }
+
+                    if (i == 0 && !copied)
+                        cpNeeded = false; // No copy on read caches, skip next checks.
+                }
+
                 assert row != null;
 
                 if (readEvt) {
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
index 3847373..48567da 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
@@ -756,7 +756,7 @@
             }
             finally {
                 // Make sure any activity related to current attempt is cancelled.
-                cancelRemoteQueriesIfNeeded(nodes, r, qryReqId);
+                cancelRemoteQueriesIfNeeded(nodes, r, qryReqId, qry.distributedJoins());
 
                 if (!runs.remove(qryReqId, r))
                     U.warn(log, "Query run was already removed: " + qryReqId);
@@ -793,15 +793,26 @@
     }
 
     /**
+     * @param nodes Query nodes.
      * @param r Query run.
      * @param qryReqId Query id.
+     * @param distributedJoins Distributed join flag.
      */
-    private void cancelRemoteQueriesIfNeeded(Collection<ClusterNode> nodes, QueryRun r, long qryReqId) {
-        for (GridMergeIndex idx : r.idxs) {
-            if (!idx.fetchedAll()) {
-                send(nodes, new GridQueryCancelRequest(qryReqId), null, false);
+    private void cancelRemoteQueriesIfNeeded(Collection<ClusterNode> nodes,
+        QueryRun r,
+        long qryReqId,
+        boolean distributedJoins)
+    {
+        // For distributedJoins need always send cancel request to cleanup resources.
+        if (distributedJoins)
+            send(nodes, new GridQueryCancelRequest(qryReqId), null, false);
+        else {
+            for (GridMergeIndex idx : r.idxs) {
+                if (!idx.fetchedAll()) {
+                    send(nodes, new GridQueryCancelRequest(qryReqId), null, false);
 
-                break;
+                    break;
+                }
             }
         }
     }
@@ -1375,7 +1386,7 @@
          * @throws IgniteCheckedException If failed.
          */
         protected Iter(ResultSet data) throws IgniteCheckedException {
-            super(data, true);
+            super(data, true, false);
         }
 
         /** {@inheritDoc} */
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheSqlQueryValueCopySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheSqlQueryValueCopySelfTest.java
new file mode 100644
index 0000000..e47e893
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheSqlQueryValueCopySelfTest.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.util.List;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests modification of values returned by query iterators with enabled copy on read.
+ */
+public class CacheSqlQueryValueCopySelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final int KEYS = 100;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        if ("client".equals(cfg.getGridName()))
+            cfg.setClientMode(true);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder);
+
+        CacheConfiguration<Integer, Value> cc = new CacheConfiguration<>();
+
+        cc.setCopyOnRead(true);
+        cc.setIndexedTypes(Integer.class, Value.class);
+
+        cfg.setCacheConfiguration(cc);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGridsMultiThreaded(3);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        for (int i = 0; i < KEYS; i++)
+            cache.put(i, new Value("before"));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        cache.removeAll();
+
+        super.afterTest();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        stopAllGrids();
+    }
+
+    /**
+     * Tests two step query from dedicated client.
+     *
+     * @throws Exception If failed.
+     */
+    public void testTwoStepSqlClientQuery() throws Exception {
+        try (Ignite client = startGrid("client")) {
+            IgniteCache<Integer, Value> cache = client.cache(null);
+
+            List<Cache.Entry<Integer, Value>> all = cache.query(
+                new SqlQuery<Integer, Value>(Value.class, "select * from Value")).getAll();
+
+            assertEquals(KEYS, all.size());
+
+            for (Cache.Entry<Integer, Value> entry : all)
+                entry.getValue().str = "after";
+
+            check(cache);
+
+            QueryCursor<List<?>> qry = cache.query(new SqlFieldsQuery("select _val from Value"));
+
+            List<List<?>> all0 = qry.getAll();
+
+            assertEquals(KEYS, all0.size());
+
+            for (List<?> entry : all0)
+                ((Value)entry.get(0)).str = "after";
+
+            check(cache);
+        }
+    }
+
+    /**
+     * Test two step query without local reduce phase.
+     */
+    public void testTwoStepSkipReduceSqlQuery() {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        List<Cache.Entry<Integer, Value>> all = cache.query(
+            new SqlQuery<Integer, Value>(Value.class, "select * from Value").setPageSize(3)).getAll();
+
+        assertEquals(KEYS, all.size());
+
+        for (Cache.Entry<Integer, Value> entry : all)
+            entry.getValue().str = "after";
+
+        check(cache);
+    }
+
+    /**
+     * Test two step query value copy.
+     */
+    public void testTwoStepReduceSqlQuery() {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        QueryCursor<List<?>> qry = cache.query(new SqlFieldsQuery("select _val from Value order by _key"));
+
+        List<List<?>> all = qry.getAll();
+
+        assertEquals(KEYS, all.size());
+
+        for (List<?> entry : all)
+            ((Value)entry.get(0)).str = "after";
+
+        check(cache);
+    }
+
+    /**
+     * Tests local sql query.
+     */
+    public void testLocalSqlQuery() {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        SqlQuery<Integer, Value> qry = new SqlQuery<>(Value.class.getSimpleName(), "select * from Value");
+        qry.setLocal(true);
+
+        List<Cache.Entry<Integer, Value>> all = cache.query(qry).getAll();
+
+        assertFalse(all.isEmpty());
+
+        for (Cache.Entry<Integer, Value> entry : all)
+            entry.getValue().str = "after";
+
+        check(cache);
+    }
+
+    /**
+     * Tests local sql query.
+     */
+    public void testLocalSqlFieldsQuery() {
+        IgniteCache<Integer, Value> cache = grid(0).cache(null);
+
+        QueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("select _val from Value").setLocal(true));
+
+        List<List<?>> all = cur.getAll();
+
+        assertFalse(all.isEmpty());
+
+        for (List<?> entry : all)
+            ((Value)entry.get(0)).str = "after";
+
+        check(cache);
+    }
+
+    /** */
+    private static class Value {
+        /** */
+        private String str;
+
+        /**
+         * @param str String.
+         */
+        public Value(String str) {
+            this.str = str;
+        }
+    }
+
+    /**
+     * @param cache Cache.
+     */
+    private void check(IgniteCache<Integer, Value> cache) {
+        int cnt = 0;
+
+        // Value should be not modified by previous assignment.
+        for (Cache.Entry<Integer, Value> entry : cache) {
+            cnt++;
+
+            assertEquals("before", entry.getValue().str);
+        }
+
+        assertEquals(KEYS, cnt);
+    }
+}
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
index 926d294..d5f02eb 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
@@ -71,7 +71,7 @@
     private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
 
     /** Cache name. */
-    private static final String CACHE = "cache";
+    protected static final String CACHE = "cache";
 
     /** Empty cache name. */
     private static final String EMPTY_CACHE = "emptyCache";
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapEvictQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapEvictQueryTest.java
index f21a279..aecbb03 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapEvictQueryTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapEvictQueryTest.java
@@ -174,7 +174,7 @@
                             }
                         }
 
-                        LT.warn(log, null, e.getMessage());
+                        LT.warn(log, e.getMessage());
 
                         return;
                     }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueriesLoadTest1.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueriesLoadTest1.java
new file mode 100644
index 0000000..d16fe99
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueriesLoadTest1.java
@@ -0,0 +1,604 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.binary.BinaryObject;
+import org.apache.ignite.cache.QueryEntity;
+import org.apache.ignite.cache.QueryIndex;
+import org.apache.ignite.cache.QueryIndexType;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.lang.IgniteCallable;
+import org.apache.ignite.lang.IgniteRunnable;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMemoryMode.OFFHEAP_TIERED;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ *
+ */
+@SuppressWarnings("unchecked")
+public class IgniteCacheQueriesLoadTest1 extends GridCommonAbstractTest {
+    /** Operation. */
+    private static final String OPERATION = "Operation";
+
+    /** Deposit. */
+    private static final String DEPOSIT = "Deposit";
+
+    /** Trader. */
+    private static final String TRADER = "Trader";
+
+    /** Id. */
+    private static final String ID = "ID";
+
+    /** Deposit id. */
+    private static final String DEPOSIT_ID = "DEPOSIT_ID";
+
+    /** Trader id. */
+    private static final String TRADER_ID = "TRADER_ID";
+
+    /** Firstname. */
+    private static final String FIRSTNAME = "FIRSTNAME";
+
+    /** Secondname. */
+    private static final String SECONDNAME = "SECONDNAME";
+
+    /** Email. */
+    private static final String EMAIL = "EMAIL";
+
+    /** Business day. */
+    private static final String BUSINESS_DAY = "BUSINESS_DAY";
+
+    /** Trader link. */
+    private static final String TRADER_LINK = "TRADER";
+
+    /** Balance. */
+    private static final String BALANCE = "BALANCE";
+
+    /** Margin rate. */
+    private static final String MARGIN_RATE = "MARGIN_RATE";
+
+    /** Balance on day open. */
+    private static final String BALANCE_ON_DAY_OPEN = "BALANCEDO";
+
+    /** Trader cache name. */
+    private static final String TRADER_CACHE = "TRADER_CACHE";
+
+    /** Deposit cache name. */
+    private static final String DEPOSIT_CACHE = "DEPOSIT_CACHE";
+
+    /** History of operation over deposit. */
+    private static final String DEPOSIT_HISTORY_CACHE = "DEPOSIT_HISTORY_CACHE";
+
+    /** Count of operations by deposit. */
+    private static final String DEPOSIT_OPERATION_COUNT_SQL = "SELECT COUNT(*) FROM \"" + DEPOSIT_HISTORY_CACHE
+        + "\"." + OPERATION + " WHERE " + "DEPOSIT_ID" + "=?";
+
+    /** Get last history row. */
+    private static final String LAST_HISTORY_ROW_SQL = "SELECT MAX("+BUSINESS_DAY+") FROM \""+DEPOSIT_HISTORY_CACHE
+        + "\"." + OPERATION + " WHERE " + "DEPOSIT_ID" + "=?";
+
+    /** Find deposit SQL query. */
+    private static final String FIND_DEPOSIT_SQL = "SELECT _key FROM \"" + DEPOSIT_CACHE + "\"." + DEPOSIT
+        + " WHERE " + TRADER_ID + "=?";
+
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final int NODES = 5;
+
+    /** Distribution of partitions by nodes. */
+    private Map<UUID, List<Integer>> partitionsMap;
+
+    /** Preload amount. */
+    private final int preloadAmount = 10_000;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setIncludeEventTypes();
+
+        cfg.setMarshaller(null);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        RendezvousAffinityFunction aff = new RendezvousAffinityFunction();
+        aff.setPartitions(3000);
+
+        CacheConfiguration<Object, Object> parentCfg = new CacheConfiguration<>();
+        parentCfg.setAffinity(aff);
+        parentCfg.setAtomicityMode(TRANSACTIONAL);
+        parentCfg.setCacheMode(PARTITIONED);
+        parentCfg.setMemoryMode(OFFHEAP_TIERED);
+        parentCfg.setBackups(2);
+        parentCfg.setWriteSynchronizationMode(FULL_SYNC);
+
+        cfg.setCacheConfiguration(
+            getTraderCfg(parentCfg),
+            getDepositCfg(parentCfg),
+            getDepositHistoryCfg(parentCfg)
+        );
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueries() throws Exception {
+        runQueries(1, true, 10_000);
+
+        runQueries(10, false, 30_000);
+    }
+
+    /**
+     * @param threads Threads number.
+     * @param checkBalance Check balance flag.
+     * @param time Execution time.
+     * @throws Exception If failed.
+     */
+    private void runQueries(int threads, final boolean checkBalance, final long time) throws Exception {
+        final Ignite ignite = grid(0);
+
+        GridTestUtils.runMultiThreaded(new Callable<Object>() {
+            @Override public Object call() {
+                long endTime = System.currentTimeMillis() + time;
+
+                while (System.currentTimeMillis() < endTime) {
+                    ScanQueryBroadcastClosure c = new ScanQueryBroadcastClosure(partitionsMap, checkBalance);
+
+                    ignite.compute().broadcast(c);
+                }
+
+                return null;
+            }
+        }, threads, "test-thread");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGridsMultiThreaded(NODES);
+
+        partitionsMap = traderCachePartitions(ignite(0));
+
+        assertEquals(NODES, partitionsMap.size());
+
+        preLoading();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        assert G.allGrids().isEmpty();
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    private void preLoading() throws Exception {
+        final Thread preloadAccount = new Thread() {
+            @Override public void run() {
+                setName("preloadTraders");
+
+                Ignite ignite = ignite(0);
+
+                try (IgniteDataStreamer dataLdr = ignite.dataStreamer(TRADER_CACHE)) {
+                    for (int i = 0; i < preloadAmount && !isInterrupted(); i++) {
+                        String traderKey = "traderId=" + i;
+
+                        dataLdr.addData(traderKey, createTrader(ignite, traderKey));
+                    }
+                }
+            }
+        };
+
+        preloadAccount.start();
+
+        Thread preloadTrade = new Thread() {
+            @Override public void run() {
+                setName("preloadDeposits");
+
+                Ignite ignite = ignite(0);
+
+                try (IgniteDataStreamer dataLdr = ignite.dataStreamer(DEPOSIT_CACHE)) {
+                    for (int i = 0; i < preloadAmount && !isInterrupted(); i++) {
+                        int traderId = nextRandom(preloadAmount);
+
+                        String traderKey = "traderId=" + traderId;
+                        String key = traderKey + "&depositId=" + i;
+
+                        dataLdr.addData(key, createDeposit(ignite, key, traderKey, i));
+                    }
+                }
+            }
+        };
+
+        preloadTrade.start();
+
+        preloadTrade.join();
+        preloadAccount.join();
+    }
+
+    /**
+     * @param ignite Node.
+     * @param id Identifier.
+     * @return Trader entity as binary object.
+     */
+    private BinaryObject createTrader(Ignite ignite, String id) {
+        return ignite.binary()
+            .builder(TRADER)
+            .setField(ID, id)
+            .setField(FIRSTNAME, "First name " + id)
+            .setField(SECONDNAME, "Second name " + id)
+            .setField(EMAIL, "trader" + id + "@mail.org")
+            .build();
+    }
+
+    /**
+     * @param ignite Node.
+     * @param id Identifier.
+     * @param traderId Key.
+     * @param num Num.
+     * @return Deposit entity as binary object.
+     */
+    private BinaryObject createDeposit(Ignite ignite, String id, String traderId, int num) {
+        double startBalance = 100 + nextRandom(100) / 1.123;
+
+        return ignite.binary()
+            .builder(DEPOSIT)
+            .setField(ID, id)
+            .setField(TRADER_ID, traderId)
+            .setField(TRADER_LINK, num)
+            .setField(BALANCE, new BigDecimal(startBalance))
+            .setField(MARGIN_RATE, new BigDecimal(0.1))
+            .setField(BALANCE_ON_DAY_OPEN, new BigDecimal(startBalance))
+            .build();
+    }
+
+    /**
+     * Building a map that contains mapping of node ID to a list of partitions stored on the node.
+     *
+     * @param ignite Node.
+     * @return Node to partitions map.
+     */
+    private Map<UUID, List<Integer>> traderCachePartitions(Ignite ignite) {
+        // Getting affinity for account cache.
+        Affinity<?> affinity = ignite.affinity(TRADER_CACHE);
+
+        // Building a list of all partitions numbers.
+        List<Integer> partNumbers = new ArrayList<>(affinity.partitions());
+
+        for (int i = 0; i < affinity.partitions(); i++)
+            partNumbers.add(i);
+
+        // Getting partition to node mapping.
+        Map<Integer, ClusterNode> partPerNodes = affinity.mapPartitionsToNodes(partNumbers);
+
+        // Building node to partitions mapping.
+        Map<UUID, List<Integer>> nodesToPart = new HashMap<>();
+
+        for (Map.Entry<Integer, ClusterNode> entry : partPerNodes.entrySet()) {
+            List<Integer> nodeParts = nodesToPart.get(entry.getValue().id());
+
+            if (nodeParts == null) {
+                nodeParts = new ArrayList<>();
+
+                nodesToPart.put(entry.getValue().id(), nodeParts);
+            }
+
+            nodeParts.add(entry.getKey());
+        }
+
+        return nodesToPart;
+    }
+
+    /**
+     * Closure for scan query executing.
+     */
+    private static class ScanQueryBroadcastClosure implements IgniteRunnable {
+        /**
+         * Ignite node.
+         */
+        @IgniteInstanceResource
+        private Ignite node;
+
+        /**
+         * Information about partition.
+         */
+        private final Map<UUID, List<Integer>> cachePart;
+
+        /** */
+        private final boolean checkBalance;
+
+        /**
+         * @param cachePart Partition by node for Ignite cache.
+         * @param checkBalance Check balance flag.
+         */
+        private ScanQueryBroadcastClosure(Map<UUID, List<Integer>> cachePart, boolean checkBalance) {
+            this.cachePart = cachePart;
+            this.checkBalance = checkBalance;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            try {
+                IgniteCache traders = node.cache(TRADER_CACHE).withKeepBinary();
+
+                IgniteCache<String, BinaryObject> depositCache = node.cache(DEPOSIT_CACHE).withKeepBinary();
+
+                // Getting a list of the partitions owned by this node.
+                List<Integer> myPartitions = cachePart.get(node.cluster().localNode().id());
+
+                for (Integer part : myPartitions) {
+                    ScanQuery scanQry = new ScanQuery();
+
+                    scanQry.setPartition(part);
+
+                    try (QueryCursor<Cache.Entry<String, BinaryObject>> cursor = traders.query(scanQry)) {
+                        for (Cache.Entry<String, BinaryObject> entry : cursor) {
+                            String traderId = entry.getKey();
+
+                            SqlFieldsQuery findDepositQry = new SqlFieldsQuery(FIND_DEPOSIT_SQL).setLocal(true);
+
+                            try (QueryCursor cursor1 = depositCache.query(findDepositQry.setArgs(traderId))) {
+                                for (Object obj : cursor1) {
+                                    List<String> depositIds = (List<String>)obj;
+
+                                    for (String depositId : depositIds) {
+                                        updateDeposit(depositCache, depositId);
+
+                                        checkDeposit(depositCache, depositId);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+            catch (Exception e) {
+                throw new IgniteException(e);
+            }
+        }
+
+        /**
+         * @param depositCache Ignite cache of deposit.
+         * @param depositKey Key of deposit.
+         * @throws Exception If failed.
+         */
+        private void updateDeposit(final IgniteCache<String, BinaryObject> depositCache, final String depositKey)
+            throws Exception {
+            final IgniteCache histCache = node.cache(DEPOSIT_HISTORY_CACHE).withKeepBinary();
+
+            doInTransaction(node, PESSIMISTIC,
+                REPEATABLE_READ, new IgniteCallable<Object>() {
+                    @Override public Object call() throws Exception {
+                        BinaryObject deposit = depositCache.get(depositKey);
+
+                        BigDecimal amount = deposit.field(BALANCE);
+                        BigDecimal rate = deposit.field(MARGIN_RATE);
+
+                        BigDecimal newBalance = amount.multiply(rate.add(BigDecimal.ONE));
+
+                        deposit = deposit.toBuilder()
+                            .setField(BALANCE, newBalance)
+                            .build();
+
+                        SqlFieldsQuery findDepositHist = new SqlFieldsQuery(LAST_HISTORY_ROW_SQL).setLocal(true);
+
+                        try (QueryCursor cursor1 = histCache.query(findDepositHist.setArgs(depositKey))) {
+                            for (Object o: cursor1){
+                                // No-op.
+                            }
+                        }
+
+                        String depositHistKey = depositKey + "&histId=" + System.nanoTime();
+
+                        BinaryObject depositHistRow = node.binary().builder(OPERATION)
+                            .setField(ID, depositHistKey)
+                            .setField(DEPOSIT_ID, depositKey)
+                            .setField(BUSINESS_DAY, new Date())
+                            .setField(BALANCE, newBalance)
+                            .build();
+
+                        histCache.put(depositHistKey, depositHistRow);
+
+                        depositCache.put(depositKey, deposit);
+
+                        return null;
+                    }
+                });
+        }
+
+        /**
+         * @param depositCache Deposit cache.
+         * @param depositKey Deposit key.
+         */
+        private void checkDeposit(IgniteCache<String, BinaryObject> depositCache, String depositKey) {
+            IgniteCache histCache = node.cache(DEPOSIT_HISTORY_CACHE).withKeepBinary();
+
+            BinaryObject deposit = depositCache.get(depositKey);
+
+            BigDecimal startBalance = deposit.field(BALANCE_ON_DAY_OPEN);
+
+            BigDecimal balance = deposit.field(BALANCE);
+
+            BigDecimal rate = deposit.field(MARGIN_RATE);
+
+            BigDecimal expBalance;
+
+            SqlFieldsQuery findDepositHist = new SqlFieldsQuery(DEPOSIT_OPERATION_COUNT_SQL);
+
+            try (QueryCursor cursor1 = histCache.query(findDepositHist.setArgs(depositKey))) {
+                Long cnt = (Long)((ArrayList)cursor1.iterator().next()).get(0);
+
+                expBalance = startBalance.multiply(rate.add(BigDecimal.ONE).pow(cnt.intValue()));
+            }
+
+            expBalance = expBalance.setScale(2, BigDecimal.ROUND_DOWN);
+            balance = balance.setScale(2, BigDecimal.ROUND_DOWN);
+
+            if (checkBalance && !expBalance.equals(balance)) {
+                node.log().error("Deposit " + depositKey + " has incorrect balance "
+                    + balance + " when expected " + expBalance, null);
+
+                throw new IgniteException("Deposit " + depositKey + " has incorrect balance "
+                    + balance + " when expected " + expBalance);
+
+            }
+        }
+    }
+
+    /**
+     * @param max Max.
+     * @return Random value.
+     */
+    private static int nextRandom(int max) {
+        return ThreadLocalRandom.current().nextInt(max);
+    }
+
+    /**
+     * @param parentCfg Parent config.
+     * @return Configuration.
+     */
+    private static CacheConfiguration<Object, Object> getDepositHistoryCfg(
+        CacheConfiguration<Object, Object> parentCfg) {
+        CacheConfiguration<Object, Object> depositHistCfg = new CacheConfiguration<>(parentCfg);
+        depositHistCfg.setName(DEPOSIT_HISTORY_CACHE);
+
+        String strCls = String.class.getCanonicalName();
+        String dblCls = Double.class.getCanonicalName();
+        String dtCls = Date.class.getCanonicalName();
+
+        LinkedHashMap<String, String> qryFields = new LinkedHashMap<>();
+        qryFields.put(ID, strCls);
+        qryFields.put(DEPOSIT_ID, strCls);
+        qryFields.put(BUSINESS_DAY, dtCls);
+        qryFields.put(BALANCE, dblCls);
+
+        QueryEntity qryEntity = new QueryEntity();
+        qryEntity.setValueType(OPERATION);
+        qryEntity.setKeyType(strCls);
+        qryEntity.setFields(qryFields);
+        qryEntity.setIndexes(Arrays.asList(new QueryIndex(ID, true), new QueryIndex(DEPOSIT_ID, true)));
+
+        depositHistCfg.setQueryEntities(Collections.singleton(qryEntity));
+
+        return depositHistCfg;
+    }
+
+    /**
+     * @param parentCfg Parent config.
+     * @return Configuration.
+     */
+    private static CacheConfiguration<Object, Object> getDepositCfg(CacheConfiguration<Object, Object> parentCfg) {
+        CacheConfiguration<Object, Object> depositCfg = new CacheConfiguration<>(parentCfg);
+        depositCfg.setName(DEPOSIT_CACHE);
+
+        String strCls = String.class.getCanonicalName();
+        String dblCls = Double.class.getCanonicalName();
+        String intCls = Integer.class.getCanonicalName();
+
+        LinkedHashMap<String, String> qryFields = new LinkedHashMap<>();
+        qryFields.put(ID, strCls);
+        qryFields.put(TRADER_ID, strCls);
+        qryFields.put(TRADER_LINK, intCls);
+        qryFields.put(BALANCE, dblCls);
+        qryFields.put(MARGIN_RATE, dblCls);
+        qryFields.put(BALANCE_ON_DAY_OPEN, dblCls);
+
+        QueryEntity qryEntity = new QueryEntity();
+        qryEntity.setValueType(DEPOSIT);
+        qryEntity.setKeyType(strCls);
+        qryEntity.setFields(qryFields);
+        qryEntity.setIndexes(Collections.singleton(new QueryIndex(ID, false)));
+
+        depositCfg.setQueryEntities(Collections.singleton(qryEntity));
+        return depositCfg;
+    }
+
+    /**
+     * @param parentCfg Parent config.
+     * @return Configuration.
+     */
+    private static CacheConfiguration<Object, Object> getTraderCfg(CacheConfiguration<Object, Object> parentCfg) {
+        CacheConfiguration<Object, Object> traderCfg = new CacheConfiguration<>(parentCfg);
+        traderCfg.setName(TRADER_CACHE);
+
+        String strCls = String.class.getCanonicalName();
+
+        LinkedHashMap<String, String> qryFields = new LinkedHashMap<>();
+        qryFields.put(ID, strCls);
+        qryFields.put(FIRSTNAME, strCls);
+        qryFields.put(SECONDNAME, strCls);
+        qryFields.put(EMAIL, strCls);
+
+        QueryEntity qryEntity = new QueryEntity();
+        qryEntity.setValueType(TRADER);
+        qryEntity.setKeyType(strCls);
+        qryEntity.setFields(qryFields);
+
+        LinkedHashMap<String, Boolean> grpIdx = new LinkedHashMap<>();
+        grpIdx.put(FIRSTNAME, false);
+        grpIdx.put(SECONDNAME, false);
+
+        qryEntity.setIndexes(Arrays.asList(
+            new QueryIndex(ID, true),
+            new QueryIndex(grpIdx, QueryIndexType.FULLTEXT)
+        ));
+
+        traderCfg.setQueryEntities(Collections.singleton(qryEntity));
+        return traderCfg;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
index a673a73..6d9cbb1 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
@@ -21,10 +21,8 @@
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.lang.reflect.Field;
 import java.util.Collection;
 import java.util.Iterator;
-import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentMap;
@@ -41,21 +39,15 @@
 import org.apache.ignite.events.DiscoveryEvent;
 import org.apache.ignite.events.Event;
 import org.apache.ignite.events.EventType;
-import org.apache.ignite.internal.IgniteKernal;
-import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAbstractQuerySelfTest;
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.lang.GridCloseableIterator;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testsuites.IgniteIgnore;
 import org.apache.ignite.transactions.Transaction;
-import org.springframework.util.ReflectionUtils;
 
 import static org.apache.ignite.cache.CacheMode.REPLICATED;
 import static org.apache.ignite.cache.CachePeekMode.ALL;
@@ -279,26 +271,6 @@
     }
 
     /**
-     * Returns private field {@code qryIters} of {@link GridCacheQueryManager} for the given grid.
-     *
-     * @param g Grid which {@link GridCacheQueryManager} should be observed.
-     * @return {@code qryIters} of {@link GridCacheQueryManager}.
-     */
-    private ConcurrentMap<UUID,
-        Map<Long, GridFutureAdapter<GridCloseableIterator<IgniteBiTuple<CacheKey, CacheValue>>>>>
-        distributedQueryManagerQueryItersMap(Ignite g) {
-        GridCacheContext ctx = ((IgniteKernal)g).internalCache().context();
-
-        Field qryItersField = ReflectionUtils.findField(ctx.queries().getClass(), "qryIters");
-
-        qryItersField.setAccessible(true);
-
-        return (ConcurrentMap<UUID,
-            Map<Long, GridFutureAdapter<GridCloseableIterator<IgniteBiTuple<CacheKey, CacheValue>>>>>)
-            ReflectionUtils.getField(qryItersField, ctx.queries());
-    }
-
-    /**
      * @throws Exception If test failed.
      */
     public void testToString() throws Exception {
@@ -375,10 +347,12 @@
 
             assertEquals(0, (int)q.iterator().next().getKey());
 
-            ConcurrentMap<UUID, ConcurrentMap<Long, ?>> map = U.field(((IgniteH2Indexing)U.field(U.field(
-                grid(0).context(), "qryProc"), "idx")).mapQueryExecutor(), "qryRess");
+            // Query for replicated cache was run on one of nodes.
+            ConcurrentMap<?, ?> mapNode1 = queryResultMap(0);
+            ConcurrentMap<?, ?> mapNode2 = queryResultMap(1);
+            ConcurrentMap<?, ?> mapNode3 = queryResultMap(2);
 
-            assertEquals(1, map.size());
+            assertEquals(1, mapNode1.size() + mapNode2.size() + mapNode3.size());
 
             final UUID nodeId = g.cluster().localNode().id();
 
@@ -397,7 +371,9 @@
 
             latch.await();
 
-            assertEquals(0, map.size());
+            assertEquals(0, mapNode1.size());
+            assertEquals(0, mapNode2.size());
+            assertEquals(0, mapNode3.size());
         }
         finally {
             stopGrid("client");
@@ -405,6 +381,14 @@
     }
 
     /**
+     * @param node Node index.
+     * @return Query results map.
+     */
+    private ConcurrentMap<?, ?> queryResultMap(int node) {
+        return U.field(((IgniteH2Indexing)U.field(grid(node).context().query(), "idx")).mapQueryExecutor(), "qryRess");
+    }
+
+    /**
      * @param cache Cache.
      * @throws Exception If check failed.
      */
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalFieldsQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalFieldsQuerySelfTest.java
index be1f196..462118f 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalFieldsQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalFieldsQuerySelfTest.java
@@ -18,6 +18,8 @@
 package org.apache.ignite.internal.processors.cache.local;
 
 import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAbstractFieldsQuerySelfTest;
 
 import static org.apache.ignite.cache.CacheMode.LOCAL;
@@ -26,6 +28,10 @@
  * Tests for fields queries.
  */
 public class IgniteCacheLocalFieldsQuerySelfTest extends IgniteCacheAbstractFieldsQuerySelfTest {
+//    static {
+//        System.setProperty(IgniteSystemProperties.IGNITE_H2_DEBUG_CONSOLE, "1");
+//    }
+
     /** {@inheritDoc} */
     @Override protected CacheMode cacheMode() {
         return LOCAL;
@@ -35,4 +41,14 @@
     @Override protected int gridCount() {
         return 1;
     }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInformationSchema() throws Exception {
+        IgniteEx ignite = grid(0);
+
+        ignite.cache(CACHE).query(
+            new SqlFieldsQuery("SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS").setLocal(true)).getAll();
+    }
 }
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
index bcf8f9d..b4abbf6 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
@@ -463,7 +463,7 @@
 
             F.println(res);
 
-            assert res.contains("/* PUBLIC.RANGE_INDEX */");
+            assertTrue(res.contains("/* PUBLIC.RANGE_INDEX */"));
         }
         finally {
             GridTestUtils.setFieldValue(spi, "log", oldLog);
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java
index 5c9c733..359c7fd 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteBinaryCacheQueryTestSuite2.java
@@ -19,6 +19,7 @@
 
 import junit.framework.TestSuite;
 import org.apache.ignite.internal.binary.BinaryMarshaller;
+import org.apache.ignite.internal.processors.cache.IgniteCacheQueriesLoadTest1;
 import org.apache.ignite.testframework.config.GridTestProperties;
 
 /**
@@ -34,6 +35,8 @@
 
         TestSuite suite = IgniteCacheQuerySelfTestSuite2.suite();
 
+        suite.addTestSuite(IgniteCacheQueriesLoadTest1.class);
+
         return suite;
     }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
index be7523f..f28da18 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
@@ -27,6 +27,7 @@
 import org.apache.ignite.internal.processors.cache.CacheReplicatedQueryMetricsDistributedSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheReplicatedQueryMetricsLocalSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheScanPartitionQueryFallbackSelfTest;
+import org.apache.ignite.internal.processors.cache.CacheSqlQueryValueCopySelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheQueryIndexingDisabledSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheFieldsQueryNoDataSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheNoClassQuerySelfTest;
@@ -106,6 +107,7 @@
         // Other.
         suite.addTestSuite(CacheQueryNewClientSelfTest.class);
         suite.addTestSuite(CacheOffheapBatchIndexingSingleTypeTest.class);
+        suite.addTestSuite(CacheSqlQueryValueCopySelfTest.class);
 
         return suite;
     }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
index 032dd3b..a865788 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
@@ -63,6 +63,7 @@
 import org.apache.ignite.internal.processors.cache.query.continuous.IgniteCacheContinuousQueryClientTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.IgniteCacheContinuousQueryClientTxReconnectTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.IgniteCacheContinuousQueryImmutableEntryTest;
+import org.apache.ignite.internal.processors.cache.query.continuous.IgniteCacheContinuousQueryNoUnsubscribeTest;
 
 /**
  * Test suite for cache queries.
@@ -121,6 +122,7 @@
         suite.addTestSuite(CacheKeepBinaryIterationSwapEnabledTest.class);
         suite.addTestSuite(CacheKeepBinaryIterationNearEnabledTest.class);
         suite.addTestSuite(IgniteCacheContinuousQueryBackupQueueTest.class);
+        suite.addTestSuite(IgniteCacheContinuousQueryNoUnsubscribeTest.class);
 
         return suite;
     }
diff --git a/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnector.java b/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnector.java
index 9385920..3fbfd9c 100644
--- a/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnector.java
+++ b/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnector.java
@@ -22,6 +22,7 @@
 import java.util.List;
 import java.util.Map;
 import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.kafka.common.config.ConfigDef;
 import org.apache.kafka.common.utils.AppInfoParser;
 import org.apache.kafka.connect.connector.Task;
 import org.apache.kafka.connect.errors.ConnectException;
@@ -34,6 +35,9 @@
     /** Sink properties. */
     private Map<String, String> configProps;
 
+    /** Expected configurations. */
+    private static final ConfigDef CONFIG_DEF = new ConfigDef();
+
     /** {@inheritDoc} */
     @Override public String version() {
         return AppInfoParser.getVersion();
@@ -88,4 +92,9 @@
     @Override public void stop() {
         // No-op.
     }
+
+    /** {@inheritDoc} */
+    @Override public ConfigDef config() {
+        return CONFIG_DEF;
+    }
 }
diff --git a/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnector.java b/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnector.java
index 59e2ed0..986888e 100644
--- a/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnector.java
+++ b/modules/kafka/src/main/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnector.java
@@ -22,6 +22,7 @@
 import java.util.List;
 import java.util.Map;
 import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.kafka.common.config.ConfigDef;
 import org.apache.kafka.common.utils.AppInfoParser;
 import org.apache.kafka.connect.connector.Task;
 import org.apache.kafka.connect.errors.ConnectException;
@@ -36,6 +37,9 @@
     /** Source properties. */
     private Map<String, String> configProps;
 
+    /** Expected configurations. */
+    private static final ConfigDef CONFIG_DEF = new ConfigDef();
+
     /** {@inheritDoc} */
     @Override public String version() {
         return AppInfoParser.getVersion();
@@ -78,4 +82,9 @@
     @Override public void stop() {
         // No-op.
     }
+
+    /** {@inheritDoc} */
+    @Override public ConfigDef config() {
+        return CONFIG_DEF;
+    }
 }
diff --git a/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnectorTest.java b/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnectorTest.java
index 1814c69..efa2fa2 100644
--- a/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnectorTest.java
+++ b/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSinkConnectorTest.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.stream.kafka.TestKafkaBroker;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.utils.SystemTime;
 import org.apache.kafka.common.utils.Utils;
 import org.apache.kafka.connect.runtime.ConnectorConfig;
 import org.apache.kafka.connect.runtime.Herder;
@@ -40,12 +41,12 @@
 import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
 import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
 import org.apache.kafka.connect.runtime.standalone.StandaloneHerder;
+import org.apache.kafka.connect.sink.SinkConnector;
 import org.apache.kafka.connect.storage.OffsetBackingStore;
 import org.apache.kafka.connect.util.Callback;
 import org.apache.kafka.connect.util.FutureCallback;
 
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_PUT;
-import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.mock;
 
 /**
@@ -59,7 +60,7 @@
     private static final String CACHE_NAME = "testCache";
 
     /** Test topics. */
-    private static final String[] TOPICS = {"test1", "test2"};
+    private static final String[] TOPICS = {"sink-test1", "sink-test2"};
 
     /** Kafka partition. */
     private static final int PARTITIONS = 3;
@@ -67,6 +68,9 @@
     /** Kafka replication factor. */
     private static final int REPLICATION_FACTOR = 1;
 
+    /** Worker id. */
+    private static final String WORKER_ID = "workerId";
+
     /** Test Kafka broker. */
     private TestKafkaBroker kafkaBroker;
 
@@ -96,9 +100,9 @@
         WorkerConfig workerCfg = new StandaloneConfig(makeWorkerProps());
 
         OffsetBackingStore offBackingStore = mock(OffsetBackingStore.class);
-        offBackingStore.configure(anyObject(Map.class));
+        offBackingStore.configure(workerCfg);
 
-        worker = new Worker(workerCfg, offBackingStore);
+        worker = new Worker(WORKER_ID, new SystemTime(), workerCfg, offBackingStore);
         worker.start();
 
         herder = new StandaloneHerder(worker);
@@ -211,7 +215,7 @@
     private Map<String, String> makeSinkProps(String topics) {
         Map<String, String> props = new HashMap<>();
 
-        props.put(ConnectorConfig.TOPICS_CONFIG, topics);
+        props.put(SinkConnector.TOPICS_CONFIG, topics);
         props.put(ConnectorConfig.TASKS_MAX_CONFIG, "2");
         props.put(ConnectorConfig.NAME_CONFIG, "test-sink-connector");
         props.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, IgniteSinkConnector.class.getName());
@@ -239,6 +243,7 @@
         props.put("key.converter.schemas.enable", "false");
         props.put("value.converter.schemas.enable", "false");
         props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
+        props.put("offset.storage.file.filename", "/tmp/connect.offsets");
         // fast flushing for testing.
         props.put(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG, "10");
 
diff --git a/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnectorTest.java b/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnectorTest.java
index 7cdb09c..a3ce10e 100644
--- a/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnectorTest.java
+++ b/modules/kafka/src/test/java/org/apache/ignite/stream/kafka/connect/IgniteSourceConnectorTest.java
@@ -26,13 +26,11 @@
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.cache.CachePeekMode;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.events.CacheEvent;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.stream.kafka.TestKafkaBroker;
@@ -43,6 +41,7 @@
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.common.utils.SystemTime;
 import org.apache.kafka.common.utils.Utils;
 import org.apache.kafka.connect.runtime.ConnectorConfig;
 import org.apache.kafka.connect.runtime.Herder;
@@ -68,7 +67,10 @@
     private static final String CACHE_NAME = "testCache";
 
     /** Test topics created by connector. */
-    private static final String[] TOPICS = {"test1", "test2"};
+    private static final String[] TOPICS = {"src-test1", "src-test2"};
+
+    /** Worker id. */
+    private static final String WORKER_ID = "workerId";
 
     /** Test Kafka broker. */
     private TestKafkaBroker kafkaBroker;
@@ -104,9 +106,9 @@
         WorkerConfig workerCfg = new StandaloneConfig(makeWorkerProps());
 
         MemoryOffsetBackingStore offBackingStore = new MemoryOffsetBackingStore();
-        offBackingStore.configure(workerCfg.originals());
+        offBackingStore.configure(workerCfg);
 
-        worker = new Worker(workerCfg, offBackingStore);
+        worker = new Worker(WORKER_ID, new SystemTime(), workerCfg, offBackingStore);
         worker.start();
 
         herder = new StandaloneHerder(worker);
@@ -280,7 +282,6 @@
                 }
             }, 20_000);
 
-
             info("Waiting for unexpected records for 5 secs.");
 
             assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@@ -345,6 +346,7 @@
         props.put("key.converter.schemas.enable", "false");
         props.put("value.converter.schemas.enable", "false");
         props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
+        props.put("offset.storage.file.filename", "/tmp/connect.offsets");
         // fast flushing for testing.
         props.put(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG, "10");
 
diff --git a/modules/osgi-karaf/src/main/resources/features.xml b/modules/osgi-karaf/src/main/resources/features.xml
index 584429d..0f761f1 100644
--- a/modules/osgi-karaf/src/main/resources/features.xml
+++ b/modules/osgi-karaf/src/main/resources/features.xml
@@ -154,7 +154,7 @@
 
     <feature name="ignite-kafka" version="${project.version}" description="Apache Ignite :: Kafka">
         <details>
-            <![CDATA[The Apache Ignite Kafka module + dependencies. This module installs the Scala 2.1 library bundle.]]>
+            <![CDATA[The Apache Ignite Kafka module + dependencies. This module installs the Scala 2.11 library bundle.]]>
         </details>
         <feature prerequisite="true">wrap</feature>
         <bundle start="true" dependency="true">mvn:org.scala-lang/scala-library/${scala211.library.version}</bundle>
@@ -190,14 +190,14 @@
     </feature>   
 
     <feature name="ignite-rest-http" version="${project.version}" description="Apache Ignite :: REST HTTP">
-         <!-- NOTICE: XOM cannot be included by default due to an incompatible license; 
+         <!-- NOTICE: XOM cannot be included by default due to an incompatible license;
                       please review its license model and install the dependency manually if you agree. -->
         <details>
-            <![CDATA[The Apache Ignite REST HTTP module + dependencies. 
-            
+            <![CDATA[The Apache Ignite REST HTTP module + dependencies.
+
             Installing this feature will trigger the installation of the 'http' feature from the Apache Karaf distribution.
-            
-            NOTE: Before using this feature you must review the license of the XOM bundle and install it manually if you accept it: 
+
+            NOTE: Before using this feature you must review the license of the XOM bundle and install it manually if you accept it:
             install -s mvn:xom/xom/1.2.5]]>
         </details>
         <feature dependency="true">http</feature>
diff --git a/modules/platforms/dotnet/Apache.Ignite.AspNet/Apache.Ignite.AspNet.nuspec b/modules/platforms/dotnet/Apache.Ignite.AspNet/Apache.Ignite.AspNet.nuspec
index 2324faa..3b3b5ac 100644
--- a/modules/platforms/dotnet/Apache.Ignite.AspNet/Apache.Ignite.AspNet.nuspec
+++ b/modules/platforms/dotnet/Apache.Ignite.AspNet/Apache.Ignite.AspNet.nuspec
@@ -44,9 +44,6 @@
             
 More info: https://apacheignite-net.readme.io/
         </description>
-        <summary>
-            Apache Ignite ASP.NET Integration
-        </summary>
         <releaseNotes></releaseNotes>
         <copyright>Copyright 2016</copyright>
         <tags>OutputCacheProvider Apache Ignite In-Memory Distributed Computing SQL NoSQL Grid Map Reduce Cache</tags>
diff --git a/modules/platforms/dotnet/Apache.Ignite.AspNet/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.AspNet/Properties/AssemblyInfo.cs
index 2a7da67..0483c95 100644
--- a/modules/platforms/dotnet/Apache.Ignite.AspNet/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.AspNet/Properties/AssemblyInfo.cs
@@ -25,7 +25,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2016")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Benchmarks/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Benchmarks/Properties/AssemblyInfo.cs
index 85af146..9a3da85 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Benchmarks/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Benchmarks/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
 using System.Runtime.InteropServices;
@@ -23,7 +23,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -33,4 +33,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Apache.Ignite.Core.Tests.NuGet.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Apache.Ignite.Core.Tests.NuGet.csproj
index 335d711..4452ac7 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Apache.Ignite.Core.Tests.NuGet.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Apache.Ignite.Core.Tests.NuGet.csproj
@@ -95,16 +95,27 @@
       <HintPath>packages\Apache.Ignite.Log4Net.1.8.0\lib\net40\Apache.Ignite.Log4Net.dll</HintPath>
       <Private>True</Private>
     </Reference>
+    <Reference Include="Apache.Ignite.EntityFramework">
+      <SpecificVersion>False</SpecificVersion>
+      <HintPath>packages\Apache.Ignite.EntityFramework.1.8.0\lib\net40\Apache.Ignite.EntityFramework.dll</HintPath>
+      <Private>True</Private>
+    </Reference>
+    <Reference Include="EntityFramework, Version=6.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089, processorArchitecture=MSIL">
+      <HintPath>packages\EntityFramework.6.1.0\lib\net40\EntityFramework.dll</HintPath>
+      <SpecificVersion>False</SpecificVersion>
+      <Private>True</Private>
+    </Reference>
     <Reference Include="NLog, Version=4.0.0.0, Culture=neutral, PublicKeyToken=5120e14c03d0593c, processorArchitecture=MSIL">
       <HintPath>packages\NLog.4.3.7\lib\net40\NLog.dll</HintPath>
       <Private>True</Private>
     </Reference>
-    <Reference Include="nunit-console-runner">
-      <HintPath>..\libs\nunit-console-runner.dll</HintPath>
-    </Reference>
-    <Reference Include="nunit.framework, Version=2.6.4.14350, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL">
+    <Reference Include="nunit-console-runner, Version=2.6.3.0, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\libs\nunit.framework.dll</HintPath>
+      <HintPath>..\packages\NUnit.Runners.2.6.3\tools\lib\nunit-console-runner.dll</HintPath>
+    </Reference>
+    <Reference Include="nunit.framework, Version=2.6.3.0, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL">
+      <SpecificVersion>False</SpecificVersion>
+      <HintPath>..\packages\NUnit.Runners.2.6.3\tools\nunit.framework.dll</HintPath>
     </Reference>
     <Reference Include="Remotion.Linq, Version=2.0.0.0, Culture=neutral, PublicKeyToken=fee00910d6e5f53b, processorArchitecture=MSIL">
       <HintPath>packages\Remotion.Linq.2.0.1\lib\net40\Remotion.Linq.dll</HintPath>
@@ -126,6 +137,7 @@
     <Compile Include="AspNetTest.cs" />
     <Compile Include="ComputeTest.cs" />
     <Compile Include="SchemaTest.cs" />
+    <Compile Include="EntityFrameworkCacheTest.cs" />
     <Compile Include="StartupTest.cs" />
     <Compile Include="CacheTest.cs" />
     <Compile Include="Properties\AssemblyInfo.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/EntityFrameworkCacheTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/EntityFrameworkCacheTest.cs
new file mode 100644
index 0000000..b4781ce
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/EntityFrameworkCacheTest.cs
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.Core.Tests.NuGet
+{
+    using Apache.Ignite.Core.Cache.Configuration;
+    using Apache.Ignite.EntityFramework;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests the EntityFramework integration.
+    /// </summary>
+    public class EntityFrameworkCacheTest
+    {
+        /// <summary>
+        /// Tests cache startup and basic operation.
+        /// </summary>
+        [Test]
+        public void TestStartupPutGet()
+        {
+            var cfg = new IgniteConfiguration
+            {
+                DiscoverySpi = TestUtil.GetLocalDiscoverySpi(),
+                GridName = "myGrid"
+            };
+            
+            // ReSharper disable once ObjectCreationAsStatement
+            new IgniteDbConfiguration(cfg,
+                new CacheConfiguration("efMetaCache") {AtomicityMode = CacheAtomicityMode.Transactional},
+                new CacheConfiguration("efDataCache"), null);
+
+            var ignite = Ignition.GetIgnite(cfg.GridName);
+            Assert.IsNotNull(ignite);
+
+            Assert.IsNotNull(ignite.GetCache<string, object>("efMetaCache"));
+            Assert.IsNotNull(ignite.GetCache<string, object>("efDataCache"));
+        }
+
+        /// <summary>
+        /// Test teardown.
+        /// </summary>
+        [TearDown]
+        public void TearDown()
+        {
+            Ignition.StopAll(true);
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Properties/AssemblyInfo.cs
index 34fca37..e48b8fd 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
 using System.Runtime.InteropServices;
@@ -32,4 +32,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/packages.config b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/packages.config
index 80454e0..a7c48f3 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/packages.config
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.NuGet/packages.config
@@ -22,8 +22,10 @@
   <package id="Apache.Ignite.NLog" version="1.8.0" targetFramework="net40" />
   <package id="Apache.Ignite.Log4Net" version="1.8.0" targetFramework="net40" />
   <package id="Apache.Ignite.Schema" version="1.8.0" targetFramework="net40" />
+  <package id="Apache.Ignite.EntityFramework" version="1.8.0" targetFramework="net40" />
   <package id="NLog" version="4.3.7" targetFramework="net40" />
   <package id="NUnit.Runners" version="2.6.3" targetFramework="net40" />
   <package id="Remotion.Linq" version="2.0.1" targetFramework="net40" />
   <package id="log4net" version="2.0.5" targetFramework="net40" />
+  <package id="EntityFramework" version="6.1.0" targetFramework="net40" />
 </packages>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Apache.Ignite.Core.Tests.TestDll.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Apache.Ignite.Core.Tests.TestDll.csproj
index 031f6cc..b33023a 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Apache.Ignite.Core.Tests.TestDll.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Apache.Ignite.Core.Tests.TestDll.csproj
@@ -33,11 +33,6 @@
   <ItemGroup>
     <Reference Include="System" />
     <Reference Include="System.Core" />
-    <Reference Include="System.Xml.Linq" />
-    <Reference Include="System.Data.DataSetExtensions" />
-    <Reference Include="Microsoft.CSharp" />
-    <Reference Include="System.Data" />
-    <Reference Include="System.Xml" />
   </ItemGroup>
   <ItemGroup>
     <Compile Include="Properties\AssemblyInfo.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Properties/AssemblyInfo.cs
index 4aa03f1..3e7b663 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests.TestDll/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
 using System.Runtime.InteropServices;
@@ -23,7 +23,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
index a9de399..ccd7cc4 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
@@ -49,9 +49,9 @@
       <SpecificVersion>False</SpecificVersion>
       <HintPath>..\packages\NUnit.Runners.2.6.3\tools\lib\nunit-console-runner.dll</HintPath>
     </Reference>
-    <Reference Include="nunit.framework, Version=2.6.4.14350, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL">
+    <Reference Include="nunit.framework, Version=2.6.3.0, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\libs\nunit.framework.dll</HintPath>
+      <HintPath>..\packages\NUnit.Runners.2.6.3\tools\nunit.framework.dll</HintPath>
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.configuration" />
@@ -64,6 +64,8 @@
   <ItemGroup>
     <Compile Include="Binary\BinaryReaderWriterTest.cs" />
     <Compile Include="Binary\IO\BinaryStreamsTest.cs" />
+    <Compile Include="Binary\JavaTypeMappingTest.cs" />
+    <Compile Include="Binary\TypeResolverTest.cs" />
     <Compile Include="Cache\Affinity\AffinityKeyTest.cs" />
     <Compile Include="Cache\Affinity\AffinityTopologyVersionTest.cs" />
     <Compile Include="Cache\CacheResultTest.cs" />
@@ -185,7 +187,6 @@
     <Compile Include="Services\ServiceProxyTest.cs" />
     <Compile Include="Services\ServicesAsyncWrapper.cs" />
     <Compile Include="TestRunner.cs" />
-    <Compile Include="TypeResolverTest.cs" />
     <Compile Include="WindowsServiceTest.cs" />
   </ItemGroup>
   <ItemGroup>
@@ -279,6 +280,12 @@
     <Content Include="Config\Log\custom-log.xml">
       <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
     </Content>
+    <Content Include="Config\Log\dotnet-log4j.xml">
+      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+    </Content>
+    <Content Include="Config\Log\custom-log.xml">
+      <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+    </Content>
     <Content Include="Config\spring-test.xml">
       <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
     </Content>
@@ -337,6 +344,7 @@
     <None Include="Config\Apache.Ignite.exe.config.test2">
       <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
     </None>
+    <None Include="packages.config" />
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
   <PropertyGroup Condition="'$(Platform)' != 'AnyCPU'">
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/JavaTypeMappingTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/JavaTypeMappingTest.cs
new file mode 100644
index 0000000..10f7cb7
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/JavaTypeMappingTest.cs
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.Core.Tests.Binary
+{
+    using System;
+    using Apache.Ignite.Core.Impl.Binary;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests the type mapping between .NET and Java.
+    /// </summary>
+    public class JavaTypeMappingTest
+    {
+        /// <summary>
+        /// Tests .NET to Java type mapping.
+        /// </summary>
+        [Test]
+        public void TestDotNetToJavaMapping()
+        {
+            Assert.AreEqual("java.lang.Boolean", JavaTypes.GetJavaTypeName(typeof(bool)));
+            Assert.AreEqual("java.lang.Boolean", JavaTypes.GetJavaTypeName(typeof(bool?)));
+
+            Assert.AreEqual("java.lang.Byte", JavaTypes.GetJavaTypeName(typeof(byte)));
+            Assert.AreEqual("java.lang.Byte", JavaTypes.GetJavaTypeName(typeof(byte?)));
+            Assert.AreEqual("java.lang.Byte", JavaTypes.GetJavaTypeName(typeof(sbyte)));
+            Assert.AreEqual("java.lang.Byte", JavaTypes.GetJavaTypeName(typeof(sbyte?)));
+
+            Assert.AreEqual("java.lang.Short", JavaTypes.GetJavaTypeName(typeof(short)));
+            Assert.AreEqual("java.lang.Short", JavaTypes.GetJavaTypeName(typeof(short?)));
+            Assert.AreEqual("java.lang.Short", JavaTypes.GetJavaTypeName(typeof(ushort)));
+            Assert.AreEqual("java.lang.Short", JavaTypes.GetJavaTypeName(typeof(ushort?)));
+
+            Assert.AreEqual("java.lang.Integer", JavaTypes.GetJavaTypeName(typeof(int)));
+            Assert.AreEqual("java.lang.Integer", JavaTypes.GetJavaTypeName(typeof(int?)));
+            Assert.AreEqual("java.lang.Integer", JavaTypes.GetJavaTypeName(typeof(uint)));
+            Assert.AreEqual("java.lang.Integer", JavaTypes.GetJavaTypeName(typeof(uint?)));
+
+            Assert.AreEqual("java.lang.Long", JavaTypes.GetJavaTypeName(typeof(long)));
+            Assert.AreEqual("java.lang.Long", JavaTypes.GetJavaTypeName(typeof(long?)));
+            Assert.AreEqual("java.lang.Long", JavaTypes.GetJavaTypeName(typeof(ulong)));
+            Assert.AreEqual("java.lang.Long", JavaTypes.GetJavaTypeName(typeof(ulong?)));
+
+            Assert.AreEqual("java.lang.Float", JavaTypes.GetJavaTypeName(typeof(float)));
+            Assert.AreEqual("java.lang.Float", JavaTypes.GetJavaTypeName(typeof(float?)));
+
+            Assert.AreEqual("java.lang.Double", JavaTypes.GetJavaTypeName(typeof(double)));
+            Assert.AreEqual("java.lang.Double", JavaTypes.GetJavaTypeName(typeof(double?)));
+
+            Assert.AreEqual("java.math.BigDecimal", JavaTypes.GetJavaTypeName(typeof(decimal)));
+            Assert.AreEqual("java.math.BigDecimal", JavaTypes.GetJavaTypeName(typeof(decimal?)));
+
+            Assert.AreEqual("java.lang.Character", JavaTypes.GetJavaTypeName(typeof(char)));
+            Assert.AreEqual("java.lang.Character", JavaTypes.GetJavaTypeName(typeof(char?)));
+
+            Assert.AreEqual("java.lang.String", JavaTypes.GetJavaTypeName(typeof(string)));
+
+            Assert.AreEqual("java.sql.Timestamp", JavaTypes.GetJavaTypeName(typeof(DateTime)));
+            Assert.AreEqual("java.sql.Timestamp", JavaTypes.GetJavaTypeName(typeof(DateTime?)));
+
+            Assert.AreEqual("java.util.UUID", JavaTypes.GetJavaTypeName(typeof(Guid)));
+            Assert.AreEqual("java.util.UUID", JavaTypes.GetJavaTypeName(typeof(Guid?)));
+        }
+
+        /// <summary>
+        /// Tests the Java to .NET type mapping.
+        /// </summary>
+        [Test]
+        public void TestJavaToDotNetMapping()
+        {
+            Assert.AreEqual(typeof(bool), JavaTypes.GetDotNetType("java.lang.Boolean"));
+            Assert.AreEqual(typeof(bool), JavaTypes.GetDotNetType("boolean"));
+
+            Assert.AreEqual(typeof(byte), JavaTypes.GetDotNetType("java.lang.Byte"));
+            Assert.AreEqual(typeof(byte), JavaTypes.GetDotNetType("byte"));
+
+            Assert.AreEqual(typeof(short), JavaTypes.GetDotNetType("java.lang.Short"));
+            Assert.AreEqual(typeof(short), JavaTypes.GetDotNetType("short"));
+
+            Assert.AreEqual(typeof(int), JavaTypes.GetDotNetType("java.lang.Integer"));
+            Assert.AreEqual(typeof(int), JavaTypes.GetDotNetType("int"));
+
+            Assert.AreEqual(typeof(long), JavaTypes.GetDotNetType("java.lang.Long"));
+            Assert.AreEqual(typeof(long), JavaTypes.GetDotNetType("long"));
+
+            Assert.AreEqual(typeof(float), JavaTypes.GetDotNetType("java.lang.Float"));
+            Assert.AreEqual(typeof(float), JavaTypes.GetDotNetType("float"));
+
+            Assert.AreEqual(typeof(double), JavaTypes.GetDotNetType("java.lang.Double"));
+            Assert.AreEqual(typeof(double), JavaTypes.GetDotNetType("double"));
+
+            Assert.AreEqual(typeof(char), JavaTypes.GetDotNetType("java.lang.Character"));
+            Assert.AreEqual(typeof(char), JavaTypes.GetDotNetType("char"));
+
+            Assert.AreEqual(typeof(decimal), JavaTypes.GetDotNetType("java.math.BigDecimal"));
+            Assert.AreEqual(typeof(string), JavaTypes.GetDotNetType("java.lang.String"));
+            Assert.AreEqual(typeof(DateTime), JavaTypes.GetDotNetType("java.sql.Timestamp"));
+            Assert.AreEqual(typeof(Guid), JavaTypes.GetDotNetType("java.util.UUID"));
+
+        }
+
+        /// <summary>
+        /// Tests the indirect mapping check.
+        /// </summary>
+        [Test]
+        public void TestIndirectMappingCheck()
+        {
+            Assert.AreEqual(typeof(bool), JavaTypes.GetDirectlyMappedType(typeof(bool)));
+            Assert.AreEqual(typeof(bool?), JavaTypes.GetDirectlyMappedType(typeof(bool?)));
+            Assert.AreEqual(typeof(byte), JavaTypes.GetDirectlyMappedType(typeof(byte)));
+            Assert.AreEqual(typeof(byte?), JavaTypes.GetDirectlyMappedType(typeof(byte?)));
+            Assert.AreEqual(typeof(char), JavaTypes.GetDirectlyMappedType(typeof(char)));
+            Assert.AreEqual(typeof(char?), JavaTypes.GetDirectlyMappedType(typeof(char?)));
+            Assert.AreEqual(typeof(DateTime), JavaTypes.GetDirectlyMappedType(typeof(DateTime)));
+            Assert.AreEqual(typeof(DateTime?), JavaTypes.GetDirectlyMappedType(typeof(DateTime?)));
+            Assert.AreEqual(typeof(decimal), JavaTypes.GetDirectlyMappedType(typeof(decimal)));
+            Assert.AreEqual(typeof(decimal?), JavaTypes.GetDirectlyMappedType(typeof(decimal?)));
+            Assert.AreEqual(typeof(double), JavaTypes.GetDirectlyMappedType(typeof(double)));
+            Assert.AreEqual(typeof(double?), JavaTypes.GetDirectlyMappedType(typeof(double?)));
+            Assert.AreEqual(typeof(float), JavaTypes.GetDirectlyMappedType(typeof(float)));
+            Assert.AreEqual(typeof(float?), JavaTypes.GetDirectlyMappedType(typeof(float?)));
+            Assert.AreEqual(typeof(Guid), JavaTypes.GetDirectlyMappedType(typeof(Guid)));
+            Assert.AreEqual(typeof(Guid?), JavaTypes.GetDirectlyMappedType(typeof(Guid?)));
+            Assert.AreEqual(typeof(int), JavaTypes.GetDirectlyMappedType(typeof(int)));
+            Assert.AreEqual(typeof(int?), JavaTypes.GetDirectlyMappedType(typeof(int?)));
+            Assert.AreEqual(typeof(long), JavaTypes.GetDirectlyMappedType(typeof(long)));
+            Assert.AreEqual(typeof(long?), JavaTypes.GetDirectlyMappedType(typeof(long?)));
+            Assert.AreEqual(typeof(byte), JavaTypes.GetDirectlyMappedType(typeof(sbyte)));
+            Assert.AreEqual(typeof(byte), JavaTypes.GetDirectlyMappedType(typeof(sbyte?)));
+            Assert.AreEqual(typeof(short), JavaTypes.GetDirectlyMappedType(typeof(short)));
+            Assert.AreEqual(typeof(short?), JavaTypes.GetDirectlyMappedType(typeof(short?)));
+            Assert.AreEqual(typeof(string), JavaTypes.GetDirectlyMappedType(typeof(string)));
+            Assert.AreEqual(typeof(int), JavaTypes.GetDirectlyMappedType(typeof(uint)));
+            Assert.AreEqual(typeof(int), JavaTypes.GetDirectlyMappedType(typeof(uint?)));
+            Assert.AreEqual(typeof(long), JavaTypes.GetDirectlyMappedType(typeof(ulong)));
+            Assert.AreEqual(typeof(long), JavaTypes.GetDirectlyMappedType(typeof(ulong?)));
+            Assert.AreEqual(typeof(short), JavaTypes.GetDirectlyMappedType(typeof(ushort)));
+            Assert.AreEqual(typeof(short), JavaTypes.GetDirectlyMappedType(typeof(ushort?)));
+
+            // Arbitrary type.
+            Assert.AreEqual(typeof(JavaTypeMappingTest), JavaTypes.GetDirectlyMappedType(typeof(JavaTypeMappingTest)));
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TypeResolverTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/TypeResolverTest.cs
similarity index 98%
rename from modules/platforms/dotnet/Apache.Ignite.Core.Tests/TypeResolverTest.cs
rename to modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/TypeResolverTest.cs
index a95ecd7..7d37584 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TypeResolverTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/TypeResolverTest.cs
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-namespace Apache.Ignite.Core.Tests
+namespace Apache.Ignite.Core.Tests.Binary
 {
     using System;
     using System.Collections.Generic;
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs
index 8d118be..48c5814 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 // ReSharper disable UnusedAutoPropertyAccessor.Local
 // ReSharper disable UnusedMember.Local
@@ -162,13 +162,13 @@
                 Assert.Greater(basePart, -1);
                 Assert.Less(basePart, Partitions);
 
-                var longKey = (long) key;
+                var longKey = (long)key;
                 int res;
 
                 if (PredefinedParts.TryGetValue(longKey, out res))
                     return res;
 
-                return (int) (longKey * 2 % 5);
+                return (int)(longKey * 2 % 5);
             }
 
             public override IEnumerable<IEnumerable<IClusterNode>> AssignPartitions(AffinityFunctionContext context)
@@ -181,4 +181,4 @@
             }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
index 63e236a..6910c54 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
@@ -956,10 +956,17 @@
             Assert.IsFalse(cache0.ContainsKey(key0));
             Assert.IsFalse(cache0.ContainsKey(key1));
 
+            // Test sliding expiration
             cache0.Put(key0, key0);
             cache0.Put(key1, key1);
-            cache.Get(key0); 
-            cache.Get(key1);
+            for (var i = 0; i < 3; i++)
+            {
+                Thread.Sleep(50);
+
+                // Prolong expiration by touching the entry
+                cache.Get(key0);
+                cache.Get(key1);
+            }
             Assert.IsTrue(cache0.ContainsKey(key0));
             Assert.IsTrue(cache0.ContainsKey(key1));
             Thread.Sleep(200);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheLinqTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheLinqTest.cs
index 6d3af67..1ac7fa7 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheLinqTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheLinqTest.cs
@@ -28,7 +28,6 @@
 {
     using System;
     using System.Collections;
-    using System.Diagnostics.CodeAnalysis;
     using System.Linq;
     using System.Linq.Expressions;
     using System.Text.RegularExpressions;
@@ -1171,6 +1170,11 @@
             Assert.IsFalse(fq.EnableDistributedJoins);
             Assert.IsTrue(fq.EnforceJoinOrder);
 
+            var str = query.ToString();
+            Assert.AreEqual("CacheQueryable [CacheName=, TableName=Person, Query=SqlFieldsQuery [Sql=select " +
+                            "_T0._key, _T0._val from \"\".Person as _T0 where (_T0._key > ?), Arguments=[10], " +
+                            "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True]]", str);
+
             // Check fields query
             var fieldsQuery = (ICacheQueryable) cache.AsCacheQueryable().Select(x => x.Value.Name);
 
@@ -1184,11 +1188,24 @@
             Assert.IsFalse(fq.EnableDistributedJoins);
             Assert.IsFalse(fq.EnforceJoinOrder);
 
+            str = fieldsQuery.ToString();
+            Assert.AreEqual("CacheQueryable [CacheName=, TableName=Person, Query=SqlFieldsQuery [Sql=select " +
+                            "_T0.Name from \"\".Person as _T0, Arguments=[], Local=False, PageSize=1024, " +
+                            "EnableDistributedJoins=False, EnforceJoinOrder=False]]", str);
+            
             // Check distributed joins flag propagation
             var distrQuery = cache.AsCacheQueryable(new QueryOptions {EnableDistributedJoins = true})
-                .Where(x => x.Key > 10);
+                .Where(x => x.Key > 10 && x.Value.Age > 20 && x.Value.Name.Contains("x"));
+
             query = (ICacheQueryable) distrQuery;
+
             Assert.IsTrue(query.GetFieldsQuery().EnableDistributedJoins);
+
+            str = distrQuery.ToString();
+            Assert.AreEqual("CacheQueryable [CacheName=, TableName=Person, Query=SqlFieldsQuery [Sql=select " +
+                            "_T0._key, _T0._val from \"\".Person as _T0 where (((_T0._key > ?) and (_T0.age1 > ?)) " +
+                            "and (_T0.Name like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " +
+                            "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False]]", str);
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesCodeConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesCodeConfigurationTest.cs
index d5f98ac..7cb999f 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesCodeConfigurationTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesCodeConfigurationTest.cs
@@ -150,6 +150,11 @@
                     Assert.AreEqual(2, cursor.GetAll().Single().Key);
                 }
 
+                using (var cursor = cache.Query(new SqlQuery(typeof(AttributeQueryPerson), "salary > ?", 10)))
+                {
+                    Assert.AreEqual(2, cursor.GetAll().Single().Key);
+                }
+
                 using (var cursor = cache.Query(new SqlQuery(typeof(AttributeQueryPerson), "Country = ?", "USA")))
                 {
                     Assert.AreEqual(1, cursor.GetAll().Single().Key);
@@ -186,6 +191,7 @@
             {
                 Name = name;
                 Age = age;
+                Salary = age;
             }
 
             /// <summary>
@@ -214,6 +220,12 @@
             /// </value>
             [QuerySqlField]
             public AttributeQueryAddress Address { get; set; }
+
+            /// <summary>
+            /// Gets or sets the salary.
+            /// </summary>
+            [QuerySqlField]
+            public decimal? Salary { get; set; }
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs
index a8ffe13..9d6f8fb 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheQueriesTest.cs
@@ -123,32 +123,20 @@
         }
 
         /// <summary>
-        /// 
+        /// Gets the ignite.
         /// </summary>
-        /// <param name="idx"></param>
-        /// <returns></returns>
-        private IIgnite GetIgnite(int idx)
+        private static IIgnite GetIgnite()
         {
-            return Ignition.GetIgnite("grid-" + idx);
-        }
-
-        /// <summary>
-        /// 
-        /// </summary>
-        /// <param name="idx"></param>
-        /// <returns></returns>
-        private ICache<int, QueryPerson> Cache(int idx)
-        {
-            return GetIgnite(idx).GetCache<int, QueryPerson>(CacheName);
+            return Ignition.GetIgnite("grid-0");
         }
 
         /// <summary>
         /// 
         /// </summary>
         /// <returns></returns>
-        private ICache<int, QueryPerson> Cache()
+        private static ICache<int, QueryPerson> Cache()
         {
-            return Cache(0);
+            return GetIgnite().GetCache<int, QueryPerson>(CacheName);
         }
 
         /// <summary>
@@ -236,8 +224,7 @@
             Cache().Put(4, new QueryPerson("Unknown", 60));
 
             // 1. Empty result set.
-            using (
-                IQueryCursor<ICacheEntry<int, QueryPerson>> cursor =
+            using (IQueryCursor<ICacheEntry<int, QueryPerson>> cursor =
                     Cache().Query(new SqlQuery(typeof(QueryPerson), "age = 100")))
             {
                 IEnumerator<ICacheEntry<int, QueryPerson>> e = cursor.GetEnumerator();
@@ -251,6 +238,8 @@
                     { ICacheEntry<int, QueryPerson> entry = e.Current; });
 
                 Assert.Throws<NotSupportedException>(() => e.Reset());
+
+                e.Dispose();
             }
 
             SqlQuery qry = new SqlQuery(typeof (QueryPerson), "age < 60");
@@ -446,7 +435,7 @@
         [Test]
         public void TestIndexingDisabledError()
         {
-            var cache = GetIgnite(0).GetOrCreateCache<int, QueryPerson>("nonindexed_cache");
+            var cache = GetIgnite().GetOrCreateCache<int, QueryPerson>("nonindexed_cache");
 
             var queries = new QueryBase[]
             {
@@ -552,14 +541,14 @@
         [Test]
         public void TestDistributedJoins()
         {
-            var cache = GetIgnite(0).GetOrCreateCache<int, QueryPerson>(
+            var cache = GetIgnite().GetOrCreateCache<int, QueryPerson>(
                 new CacheConfiguration("replicatedCache")
                 {
                     QueryEntities = new[]
                     {
                         new QueryEntity(typeof(int), typeof(QueryPerson))
                         {
-                            Fields = new[] {new QueryField("age", typeof(int))}
+                            Fields = new[] {new QueryField("age", "int")}
                         }
                     }
                 });
@@ -585,6 +574,18 @@
         }
 
         /// <summary>
+        /// Tests the get configuration.
+        /// </summary>
+        [Test]
+        public void TestGetConfiguration()
+        {
+            var entity = Cache().GetConfiguration().QueryEntities.Single();
+
+            Assert.AreEqual(typeof(int), entity.Fields.Single(x => x.Name == "age").FieldType);
+            Assert.AreEqual(typeof(string), entity.Fields.Single(x => x.Name == "name").FieldType);
+        }
+
+        /// <summary>
         /// Validates the query results.
         /// </summary>
         /// <param name="cache">Cache.</param>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
index 1e0287f..71a4718 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
@@ -30,7 +30,6 @@
     using Apache.Ignite.Core.Common;
     using Apache.Ignite.Core.Compute;
     using Apache.Ignite.Core.Impl;
-    using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Resource;
     using NUnit.Framework;
 
@@ -1234,33 +1233,23 @@
         /// Create configuration.
         /// </summary>
         /// <param name="path">XML config path.</param>
-        private IgniteConfiguration Configuration(string path)
+        private static IgniteConfiguration Configuration(string path)
         {
-            IgniteConfiguration cfg = new IgniteConfiguration();
-
-            BinaryConfiguration portCfg = new BinaryConfiguration();
-
-            var portTypeCfgs = new List<BinaryTypeConfiguration>
+            return new IgniteConfiguration(TestUtils.GetTestConfiguration())
             {
-                new BinaryTypeConfiguration(typeof (PlatformComputeBinarizable)),
-                new BinaryTypeConfiguration(typeof (PlatformComputeNetBinarizable)),
-                new BinaryTypeConfiguration(JavaBinaryCls),
-                new BinaryTypeConfiguration(typeof(PlatformComputeEnum)),
-                new BinaryTypeConfiguration(typeof(InteropComputeEnumFieldTest))
+                BinaryConfiguration = new BinaryConfiguration
+                {
+                    TypeConfigurations = new List<BinaryTypeConfiguration>
+                    {
+                        new BinaryTypeConfiguration(typeof(PlatformComputeBinarizable)),
+                        new BinaryTypeConfiguration(typeof(PlatformComputeNetBinarizable)),
+                        new BinaryTypeConfiguration(JavaBinaryCls),
+                        new BinaryTypeConfiguration(typeof(PlatformComputeEnum)),
+                        new BinaryTypeConfiguration(typeof(InteropComputeEnumFieldTest))
+                    }
+                },
+                SpringConfigUrl = path
             };
-
-
-            portCfg.TypeConfigurations = portTypeCfgs;
-
-            cfg.BinaryConfiguration = portCfg;
-
-            cfg.JvmClasspath = Classpath.CreateClasspath(cfg, true);
-
-            cfg.JvmOptions = TestUtils.TestJavaOptions();
-
-            cfg.SpringConfigUrl = path;
-
-            return cfg;
         }
     }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-query.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-query.xml
index b0dc48f..dd5d4d9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-query.xml
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/cache-query.xml
@@ -57,24 +57,34 @@
                     <property name="atomicityMode" value="TRANSACTIONAL"/>
                     <property name="writeSynchronizationMode" value="FULL_SYNC"/>
 
-                    <property name="typeMetadata">
+                    <property name="queryEntities">
                         <list>
-                            <bean class="org.apache.ignite.cache.CacheTypeMetadata">
+                            <bean class="org.apache.ignite.cache.QueryEntity">
                                 <property name="valueType" value="QueryPerson"/>
-                                <property name="ascendingFields">
-                                    <map>
-                                        <entry key="age" value="java.lang.Integer"/>
-                                    </map>
+                                <property name="fields">
+                                    <util:map map-class="java.util.LinkedHashMap">
+                                        <entry key="age" value="int" />
+                                        <entry key="name" value="java.lang.String" />
+                                    </util:map>
                                 </property>
-                                <property name="queryFields">
-                                    <map>
-                                        <entry key="name" value="java.lang.String"/>
-                                        <entry key="age" value="java.lang.Integer"/>
-                                    </map>
-                                </property>
-                                <property name="textFields">
+                                <property name="indexes">
                                     <list>
-                                        <value>name</value>
+                                        <bean class="org.apache.ignite.cache.QueryIndex">
+                                            <property name="fields">
+                                                <map>
+                                                    <entry key="name" value="true"/>
+                                                </map>
+                                            </property>
+                                            <property name="indexType" value="FULLTEXT"/>
+                                        </bean>
+                                        <bean class="org.apache.ignite.cache.QueryIndex">
+                                            <property name="fields">
+                                                <map>
+                                                    <entry key="age" value="true"/>
+                                                </map>
+                                            </property>
+                                            <property name="indexType" value="SORTED"/>
+                                        </bean>
                                     </list>
                                 </property>
                             </bean>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
index ac3e553..898f12a 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
@@ -22,6 +22,7 @@
     using System;
     using System.Collections;
     using System.Collections.Generic;
+    using System.Configuration;
     using System.Globalization;
     using System.IO;
     using System.Linq;
@@ -116,9 +117,8 @@
                             <logger type='Apache.Ignite.Core.Tests.IgniteConfigurationSerializerTest+TestLogger, Apache.Ignite.Core.Tests' />
                             <swapSpaceSpi type='FileSwapSpaceSpi' baseDirectory='abcd' maximumSparsity='0.7' maximumWriteQueueSize='25' readStripesNumber='36' writeBufferSize='47' />
                         </igniteConfig>";
-            var reader = XmlReader.Create(new StringReader(xml));
 
-            var cfg = IgniteConfigurationXmlSerializer.Deserialize(reader);
+            var cfg = IgniteConfiguration.FromXml(xml);
 
             Assert.AreEqual("c:", cfg.WorkDirectory);
             Assert.AreEqual("127.1.1.1", cfg.Localhost);
@@ -294,6 +294,132 @@
         }
 
         /// <summary>
+        /// Tests the XML conversion.
+        /// </summary>
+        [Test]
+        public void TestToXml()
+        {
+            // Empty config
+            Assert.AreEqual("<?xml version=\"1.0\" encoding=\"utf-16\"?>\r\n<igniteConfiguration " +
+                            "xmlns=\"http://ignite.apache.org/schema/dotnet/IgniteConfigurationSection\" />",
+                new IgniteConfiguration().ToXml());
+
+            // Some properties
+            var cfg = new IgniteConfiguration
+            {
+                GridName = "myGrid",
+                ClientMode = true,
+                CacheConfiguration = new[]
+                {
+                    new CacheConfiguration("myCache")
+                    {
+                        CacheMode = CacheMode.Replicated,
+                        QueryEntities = new[]
+                        {
+                            new QueryEntity(typeof(int)),
+                            new QueryEntity(typeof(int), typeof(string))
+                        }
+                    }
+                },
+                IncludedEventTypes = new[]
+                {
+                    EventType.CacheEntryCreated,
+                    EventType.CacheNodesLeft
+                }
+            };
+
+            Assert.AreEqual(FixLineEndings(@"<?xml version=""1.0"" encoding=""utf-16""?>
+<igniteConfiguration gridName=""myGrid"" clientMode=""true"" xmlns=""http://ignite.apache.org/schema/dotnet/IgniteConfigurationSection"">
+  <cacheConfiguration>
+    <cacheConfiguration name=""myCache"" cacheMode=""Replicated"">
+      <queryEntities>
+        <queryEntity valueTypeName=""java.lang.Integer"" valueType=""System.Int32"" />
+        <queryEntity keyTypeName=""java.lang.Integer"" keyType=""System.Int32"" valueTypeName=""java.lang.String"" valueType=""System.String"" />
+      </queryEntities>
+    </cacheConfiguration>
+  </cacheConfiguration>
+  <includedEventTypes>
+    <int>CacheEntryCreated</int>
+    <int>CacheNodesLeft</int>
+  </includedEventTypes>
+</igniteConfiguration>"), cfg.ToXml());
+
+            // Custom section name and indent
+            var sb = new StringBuilder();
+
+            var settings = new XmlWriterSettings
+            {
+                Indent = true,
+                IndentChars = " "
+            };
+
+            using (var xmlWriter = XmlWriter.Create(sb, settings))
+            {
+                cfg.ToXml(xmlWriter, "igCfg");
+            }
+
+            Assert.AreEqual(FixLineEndings(@"<?xml version=""1.0"" encoding=""utf-16""?>
+<igCfg gridName=""myGrid"" clientMode=""true"" xmlns=""http://ignite.apache.org/schema/dotnet/IgniteConfigurationSection"">
+ <cacheConfiguration>
+  <cacheConfiguration name=""myCache"" cacheMode=""Replicated"">
+   <queryEntities>
+    <queryEntity valueTypeName=""java.lang.Integer"" valueType=""System.Int32"" />
+    <queryEntity keyTypeName=""java.lang.Integer"" keyType=""System.Int32"" valueTypeName=""java.lang.String"" valueType=""System.String"" />
+   </queryEntities>
+  </cacheConfiguration>
+ </cacheConfiguration>
+ <includedEventTypes>
+  <int>CacheEntryCreated</int>
+  <int>CacheNodesLeft</int>
+ </includedEventTypes>
+</igCfg>"), sb.ToString());
+        }
+
+        /// <summary>
+        /// Tests the deserialization.
+        /// </summary>
+        [Test]
+        public void TestFromXml()
+        {
+            // Empty section.
+            var cfg = IgniteConfiguration.FromXml("<x />");
+            AssertReflectionEqual(new IgniteConfiguration(), cfg);
+
+            // Empty section with XML header.
+            cfg = IgniteConfiguration.FromXml("<?xml version=\"1.0\" encoding=\"utf-16\"?><x />");
+            AssertReflectionEqual(new IgniteConfiguration(), cfg);
+
+            // Simple test.
+            cfg = IgniteConfiguration.FromXml(@"<igCfg gridName=""myGrid"" clientMode=""true"" />");
+            AssertReflectionEqual(new IgniteConfiguration {GridName = "myGrid", ClientMode = true}, cfg);
+
+            // Invalid xml.
+            var ex = Assert.Throws<ConfigurationErrorsException>(() =>
+                IgniteConfiguration.FromXml(@"<igCfg foo=""bar"" />"));
+
+            Assert.AreEqual("Invalid IgniteConfiguration attribute 'foo=bar', there is no such property " +
+                            "on 'Apache.Ignite.Core.IgniteConfiguration'", ex.Message);
+
+            // Xml reader.
+            using (var xmlReader = XmlReader.Create(
+                new StringReader(@"<igCfg gridName=""myGrid"" clientMode=""true"" />")))
+            {
+                cfg = IgniteConfiguration.FromXml(xmlReader);
+            }
+            AssertReflectionEqual(new IgniteConfiguration { GridName = "myGrid", ClientMode = true }, cfg);
+        }
+
+        /// <summary>
+        /// Ensures windows-style \r\n line endings in a string literal.
+        /// Git settings may cause string literals in both styles.
+        /// </summary>
+        private static string FixLineEndings(string s)
+        {
+            return s.Split('\n').Select(x => x.TrimEnd('\r'))
+                .Aggregate((acc, x) => string.Format("{0}\r\n{1}", acc, x));
+        }
+
+        /// <summary>
         /// Checks the schema validation.
         /// </summary>
         private static void CheckSchemaValidation()
@@ -340,20 +466,9 @@
         /// </summary>
         private static IgniteConfiguration SerializeDeserialize(IgniteConfiguration cfg)
         {
-            var sb = new StringBuilder();
+            var xml = cfg.ToXml();
 
-            using (var xmlWriter = XmlWriter.Create(sb))
-            {
-                IgniteConfigurationXmlSerializer.Serialize(cfg, xmlWriter, "igniteConfig");
-            }
-
-            var xml = sb.ToString();
-
-            using (var xmlReader = XmlReader.Create(new StringReader(xml)))
-            {
-                xmlReader.MoveToContent();
-                return IgniteConfigurationXmlSerializer.Deserialize(xmlReader);
-            }
+            return IgniteConfiguration.FromXml(xml);
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Log/CustomLoggerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Log/CustomLoggerTest.cs
index 73134fe..13e7854 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Log/CustomLoggerTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Log/CustomLoggerTest.cs
@@ -45,7 +45,7 @@
         [SetUp]
         public void TestSetUp()
         {
-            TestLogger.Entries.Clear();
+            TestLogger.Clear();
         }
 
         /// <summary>
@@ -363,7 +363,7 @@
         /// </summary>
         private class TestLogger : ILogger
         {
-            public static readonly List<LogEntry> Entries = new List<LogEntry>(5000);
+            private static readonly List<LogEntry> Logs = new List<LogEntry>(5000);
 
             private readonly LogLevel _minLevel;
 
@@ -372,15 +372,26 @@
                 _minLevel = minLevel;
             }
 
+            public static List<LogEntry> Entries
+            {
+                get
+                {
+                    lock (Logs)
+                    {
+                        return Logs.ToList();
+                    }
+                }
+            }
+
             public void Log(LogLevel level, string message, object[] args, IFormatProvider formatProvider, 
                 string category, string nativeErrorInfo, Exception ex)
             {
                 if (!IsEnabled(level))
                     return;
 
-                lock (Entries)
+                lock (Logs)
                 {
-                    Entries.Add(new LogEntry
+                    Logs.Add(new LogEntry
                     {
                         Level = level,
                         Message = message,
@@ -401,6 +412,14 @@
             [InstanceResource]
             // ReSharper disable once UnusedAutoPropertyAccessor.Local
             public IIgnite Ignite { get; set; }
+
+            public static void Clear()
+            {
+                lock (Logs)
+                {
+                    Logs.Clear();
+                }
+            }
         }
 
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ProjectFilesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ProjectFilesTest.cs
index b95fead..806e1de 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ProjectFilesTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ProjectFilesTest.cs
@@ -118,15 +118,15 @@
         }
 
         /// <summary>
-        /// Tests that there are no Cyrillic C instead of English C (which are on the same keyboard key).
+        /// Tests that there are no non-ASCII chars.
         /// </summary>
         [Test]
-        public void TestCyrillicChars()
+        public void TestAsciiChars()
         {
             var srcFiles = GetDotNetSourceDir().GetFiles("*.cs", SearchOption.AllDirectories)
-                  .Where(x => x.Name != "BinaryStringTest.cs" && x.Name != "BinarySelfTest.cs");
+                .Where(x => x.Name != "BinaryStringTest.cs" && x.Name != "BinarySelfTest.cs");
 
-            CheckFiles(srcFiles, x => x.Contains('\u0441') || x.Contains('\u0421'), "Files with Cyrillic 'C': ");
+            CheckFiles(srcFiles, x => x.Any(ch => ch > 255), "Files with non-ASCII chars: ");
         }
 
         /// <summary>
@@ -137,7 +137,7 @@
             var invalidFiles = files.Where(x => isInvalid(File.ReadAllText(x.FullName))).ToArray();
 
             Assert.AreEqual(0, invalidFiles.Length,
-                errorText + string.Join(", ", invalidFiles.Select(x => x.FullName)));
+                errorText + string.Join("\n ", invalidFiles.Select(x => x.FullName)));
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Properties/AssemblyInfo.cs
index 9eb2e24..817634e 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
 using System.Runtime.InteropServices;
@@ -23,7 +23,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -33,4 +33,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestRunner.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestRunner.cs
index facc598..d67d24b 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestRunner.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestRunner.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 namespace Apache.Ignite.Core.Tests
 {
@@ -21,9 +21,6 @@
     using System.Diagnostics;
     using System.Linq;
     using System.Reflection;
-    using Apache.Ignite.Core.Tests.Binary;
-    using Apache.Ignite.Core.Tests.Cache.Affinity;
-    using Apache.Ignite.Core.Tests.Cache.Query;
     using Apache.Ignite.Core.Tests.Memory;
     using NUnit.ConsoleRunner;
 
@@ -100,4 +97,4 @@
         }
 
     }
-}
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestUtils.cs
index 88a2b52..5a9c824 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestUtils.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/TestUtils.cs
@@ -22,6 +22,7 @@
     using System.Collections.Concurrent;
     using System.Collections.Generic;
     using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
     using System.Linq;
     using System.Threading;
     using Apache.Ignite.Core.Discovery.Tcp;
@@ -53,7 +54,8 @@
                 "-XX:+HeapDumpOnOutOfMemoryError",
                 "-Xms1g",
                 "-Xmx4g",
-                "-ea"
+                "-ea",
+                "-DIGNITE_QUIET=true"
             }
             : new List<string>
             {
@@ -61,7 +63,8 @@
                 "-Xms512m",
                 "-Xmx512m",
                 "-ea",
-                "-DIGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE=1000"
+                "-DIGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE=1000",
+                "-DIGNITE_QUIET=true"
             };
 
         /** */
@@ -344,6 +347,7 @@
         /// <summary>
         /// Runs the test in new process.
         /// </summary>
+        [SuppressMessage("ReSharper", "AssignNullToNotNullAttribute")]
         public static void RunTestInNewProcess(string fixtureName, string testName)
         {
             var procStart = new ProcessStartInfo
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
index d369a35..ccf079c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
@@ -21,4 +21,4 @@
   <package id="NUnit.Runners" version="2.6.3" targetFramework="net40" />
   <package id="log4net" version="2.0.5" targetFramework="net40" />
   <package id="NLog" version="4.3.7" targetFramework="net40" />
-</packages>
\ No newline at end of file
+</packages>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.Schema.nuspec b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.Schema.nuspec
index 367bdd5..e57e371 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.Schema.nuspec
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.Schema.nuspec
@@ -36,7 +36,6 @@
         <projectUrl>https://ignite.apache.org/</projectUrl>
         <iconUrl>https://ignite.apache.org/images/logo_ignite_32_32.png</iconUrl>
         <requireLicenseAcceptance>false</requireLicenseAcceptance>
-        <summary>Enables Intellisense(TM) when editing IgniteConfigurationSection in app.config and web.config.</summary>
         <description>
 XSD file describes the structure of IgniteConfigurationSection and enables Intellisense(TM) when editing IgniteConfigurationSection in app.config and web.config in Visual Studio.
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
index bb6227e..8621103 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
@@ -44,9 +44,6 @@
             
 More info: https://apacheignite-net.readme.io/
         </description>
-        <summary>
-            High-performance in-memory platform for computing and transacting on large-scale data sets in real-time.
-        </summary>
         <releaseNotes></releaseNotes>
         <copyright>Copyright 2016</copyright>
         <tags>Apache Ignite In-Memory Distributed Computing SQL NoSQL Grid Map Reduce Cache linqpad-samples</tags>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/AffinityFunctionContext.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/AffinityFunctionContext.cs
index 6067af4..6f356f9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/AffinityFunctionContext.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/AffinityFunctionContext.cs
@@ -19,10 +19,10 @@
 {
     using System.Collections.Generic;
     using System.Diagnostics;
-    using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Events;
     using Apache.Ignite.Core.Impl;
+    using Apache.Ignite.Core.Impl.Binary;
 
     /// <summary>
     /// Affinity function context.
@@ -48,7 +48,7 @@
         /// Initializes a new instance of the <see cref="AffinityFunctionContext"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        internal AffinityFunctionContext(IBinaryRawReader reader)
+        internal AffinityFunctionContext(BinaryReader reader)
         {
             Debug.Assert(reader != null);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheLock.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheLock.cs
index a930961..4edfb53 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheLock.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheLock.cs
@@ -43,7 +43,7 @@
         /// </summary>
         /// <param name="timeout">
         /// A <see cref="TimeSpan" /> representing the amount of time to wait for the lock. 
-        /// A value of –1 millisecond specifies an infinite wait.
+        /// A value of -1 millisecond specifies an infinite wait.
         /// </param>
         /// <returns>True if the current thread acquires the lock; otherwise, false.</returns>
         bool TryEnter(TimeSpan timeout);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheMetrics.cs
index 3405625..3c01587 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheMetrics.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/ICacheMetrics.cs
@@ -92,7 +92,7 @@
         /// The mean time to execute gets.
         /// </summary>
         /// <returns>
-        /// The time in �s.
+        /// The time in ms.
         /// </returns>
         float AverageGetTime { get; }
 
@@ -100,7 +100,7 @@
         /// The mean time to execute puts.
         /// </summary>
         /// <returns>
-        /// The time in �s.
+        /// The time in s.
         /// </returns>
         float AveragePutTime { get; }
 
@@ -108,7 +108,7 @@
         /// The mean time to execute removes.
         /// </summary>
         /// <returns>
-        /// The time in �s.
+        /// The time in ms.
         /// </returns>
         float AverageRemoveTime { get; }
 
@@ -116,7 +116,7 @@
         /// The mean time to execute tx commit.
         /// </summary>
         /// <returns>
-        /// The time in �s.
+        /// The time in ms.
         /// </returns>
         float AverageTxCommitTime { get; }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs
index ed9d0eb..1d896b8 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Query/SqlFieldsQuery.cs
@@ -18,6 +18,7 @@
 namespace Apache.Ignite.Core.Cache.Query
 {
     using System.Diagnostics.CodeAnalysis;
+    using System.Linq;
 
     /// <summary>
     /// SQL fields query.
@@ -102,5 +103,20 @@
         ///   <c>true</c> if join order should be enforced; otherwise, <c>false</c>.
         /// </value>
         public bool EnforceJoinOrder { get; set; }
+
+        /// <summary>
+        /// Returns a <see cref="string" /> that represents this instance.
+        /// </summary>
+        /// <returns>
+        /// A <see cref="string" /> that represents this instance.
+        /// </returns>
+        public override string ToString()
+        {
+            var args = string.Join(", ", Arguments.Select(x => x == null ? "null" : x.ToString()));
+
+            return string.Format("SqlFieldsQuery [Sql={0}, Arguments=[{1}], Local={2}, PageSize={3}, " +
+                                 "EnableDistributedJoins={4}, EnforceJoinOrder={5}]", Sql, args, Local,
+                                 PageSize, EnableDistributedJoins, EnforceJoinOrder);
+        }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Events/DiscoveryEvent.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Events/DiscoveryEvent.cs
index 5c163a2..011febf 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Events/DiscoveryEvent.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Events/DiscoveryEvent.cs
@@ -20,9 +20,9 @@
     using System.Collections.Generic;
     using System.Collections.ObjectModel;
     using System.Globalization;
-    using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Impl;
+    using Apache.Ignite.Core.Impl.Binary;
 
     /// <summary>
     /// Grid discovery event.
@@ -42,7 +42,7 @@
         /// Constructor.
         /// </summary>
         /// <param name="r">The reader to read data from.</param>
-        internal DiscoveryEvent(IBinaryRawReader r) : base(r)
+        internal DiscoveryEvent(BinaryReader r) : base(r)
         {
             _eventNode = ReadNode(r);
             _topologyVersion = r.ReadLong();
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Events/EventReader.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Events/EventReader.cs
index ee1c837..269e0bb 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Events/EventReader.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Events/EventReader.cs
@@ -18,7 +18,7 @@
 namespace Apache.Ignite.Core.Events
 {
     using System;
-    using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Impl.Binary;
 
     /// <summary>
     /// Event reader.
@@ -32,7 +32,7 @@
         /// <param name="reader">Reader.</param>
         /// <returns>Deserialized event.</returns>
         /// <exception cref="System.InvalidCastException">Incompatible event type.</exception>
-        public static T Read<T>(IBinaryRawReader reader) where T : IEvent
+        public static T Read<T>(BinaryReader reader) where T : IEvent
         {
             var clsId = reader.ReadInt();
 
@@ -49,7 +49,7 @@
         /// <param name="reader">Reader.</param>
         /// <returns>Created and deserialized instance.</returns>
         /// <exception cref="System.InvalidOperationException">Invalid event class id:  + clsId</exception>
-        private static IEvent CreateInstance(int clsId, IBinaryRawReader reader)
+        private static IEvent CreateInstance(int clsId, BinaryReader reader)
         {
             switch (clsId)
             {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
index e7aa64e..16d11e8 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
@@ -771,7 +771,8 @@
         {
             IgniteArgumentCheck.NotNullOrEmpty(xml, "xml");
 
-            using (var xmlReader = XmlReader.Create(new StringReader(xml)))
+            using (var stringReader = new StringReader(xml))
+            using (var xmlReader = XmlReader.Create(stringReader))
             {
                 // Skip XML header.
                 xmlReader.MoveToContent();
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryObjectHeader.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryObjectHeader.cs
index bb5c207..0e5ad2a 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryObjectHeader.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryObjectHeader.cs
@@ -265,7 +265,7 @@
 
                 Debug.Assert(hdr.Version == BinaryUtils.ProtoVer);
                 Debug.Assert(hdr.SchemaOffset <= hdr.Length);
-                Debug.Assert(hdr.SchemaOffset >= Size);
+                Debug.Assert(hdr.SchemaOffset >= Size || !hdr.HasSchema);
 
             }
             else
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/DateTimeHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/DateTimeHolder.cs
index 473f6c4..b80348e 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/DateTimeHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/DateTimeHolder.cs
@@ -42,11 +42,11 @@
         /// Constructor.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public DateTimeHolder(IBinaryReader reader)
+        public DateTimeHolder(IBinaryRawReader reader)
         {
             Debug.Assert(reader != null);
 
-            _item = DateTime.FromBinary(reader.GetRawReader().ReadLong());
+            _item = DateTime.FromBinary(reader.ReadLong());
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/JavaTypes.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/JavaTypes.cs
index 109d55f..d1395d2 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/JavaTypes.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/JavaTypes.cs
@@ -45,8 +45,7 @@
             {typeof (string), "java.lang.String"},
             {typeof (decimal), "java.math.BigDecimal"},
             {typeof (Guid), "java.util.UUID"},
-            {typeof (DateTime), "java.sql.Timestamp"},
-            {typeof (DateTime?), "java.sql.Timestamp"},
+            {typeof (DateTime), "java.sql.Timestamp"}
         };
 
         /** */
@@ -62,6 +61,19 @@
         private static readonly Dictionary<string, Type> JavaToNet =
             NetToJava.GroupBy(x => x.Value).ToDictionary(g => g.Key, g => g.First().Key);
 
+        /** */
+        private static readonly Dictionary<string, string> JavaPrimitiveToType = new Dictionary<string, string>
+        {
+            {"boolean", "java.lang.Boolean"},
+            {"byte", "java.lang.Byte"},
+            {"short", "java.lang.Short"},
+            {"char", "java.lang.Character"},
+            {"int", "java.lang.Integer"},
+            {"long", "java.lang.Long"},
+            {"float", "java.lang.Float"},
+            {"double", "java.lang.Double"},
+        };
+
         /// <summary>
         /// Gets the corresponding Java type name.
         /// </summary>
@@ -70,6 +82,9 @@
             if (type == null)
                 return null;
 
+            // Unwrap nullable.
+            type = Nullable.GetUnderlyingType(type) ?? type;
+
             string res;
 
             return NetToJava.TryGetValue(type, out res) ? res : null;
@@ -83,8 +98,9 @@
             if (type == null)
                 return;
 
-            Type directType;
-            if (!IndirectMappingTypes.TryGetValue(type, out directType))
+            var directType = GetDirectlyMappedType(type);
+
+            if (directType == type)
                 return;
 
             log.Warn("{0}: Type '{1}' maps to Java type '{2}' using unchecked conversion. " +
@@ -94,6 +110,19 @@
         }
 
         /// <summary>
+        /// Gets the compatible type that maps directly to Java.
+        /// </summary>
+        public static Type GetDirectlyMappedType(Type type)
+        {
+            // Unwrap nullable.
+            var unwrapType = Nullable.GetUnderlyingType(type) ?? type;
+
+            Type directType;
+
+            return IndirectMappingTypes.TryGetValue(unwrapType, out directType) ? directType : type;
+        }
+
+        /// <summary>
         /// Gets .NET type that corresponds to specified Java type name.
         /// </summary>
         /// <param name="javaTypeName">Name of the java type.</param>
@@ -103,9 +132,13 @@
             if (string.IsNullOrEmpty(javaTypeName))
                 return null;
 
+            string fullJavaTypeName;
+
+            JavaPrimitiveToType.TryGetValue(javaTypeName, out fullJavaTypeName);
+
             Type res;
 
-            return JavaToNet.TryGetValue(javaTypeName, out res) ? res : null;
+            return JavaToNet.TryGetValue(fullJavaTypeName ?? javaTypeName, out res) ? res : null;
         }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Marshaller.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Marshaller.cs
index 7acdfaa..475762a 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Marshaller.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Marshaller.cs
@@ -579,26 +579,26 @@
         /// </summary>
         private void AddSystemTypes()
         {
-            AddSystemType(BinaryUtils.TypeNativeJobHolder, w => new ComputeJobHolder(w));
-            AddSystemType(BinaryUtils.TypeComputeJobWrapper, w => new ComputeJobWrapper(w));
-            AddSystemType(BinaryUtils.TypeIgniteProxy, w => new IgniteProxy());
-            AddSystemType(BinaryUtils.TypeComputeOutFuncJob, w => new ComputeOutFuncJob(w));
-            AddSystemType(BinaryUtils.TypeComputeOutFuncWrapper, w => new ComputeOutFuncWrapper(w));
-            AddSystemType(BinaryUtils.TypeComputeFuncWrapper, w => new ComputeFuncWrapper(w));
-            AddSystemType(BinaryUtils.TypeComputeFuncJob, w => new ComputeFuncJob(w));
-            AddSystemType(BinaryUtils.TypeComputeActionJob, w => new ComputeActionJob(w));
-            AddSystemType(BinaryUtils.TypeContinuousQueryRemoteFilterHolder, w => new ContinuousQueryFilterHolder(w));
-            AddSystemType(BinaryUtils.TypeSerializableHolder, w => new SerializableObjectHolder(w),
+            AddSystemType(BinaryUtils.TypeNativeJobHolder, r => new ComputeJobHolder(r));
+            AddSystemType(BinaryUtils.TypeComputeJobWrapper, r => new ComputeJobWrapper(r));
+            AddSystemType(BinaryUtils.TypeIgniteProxy, r => new IgniteProxy());
+            AddSystemType(BinaryUtils.TypeComputeOutFuncJob, r => new ComputeOutFuncJob(r));
+            AddSystemType(BinaryUtils.TypeComputeOutFuncWrapper, r => new ComputeOutFuncWrapper(r));
+            AddSystemType(BinaryUtils.TypeComputeFuncWrapper, r => new ComputeFuncWrapper(r));
+            AddSystemType(BinaryUtils.TypeComputeFuncJob, r => new ComputeFuncJob(r));
+            AddSystemType(BinaryUtils.TypeComputeActionJob, r => new ComputeActionJob(r));
+            AddSystemType(BinaryUtils.TypeContinuousQueryRemoteFilterHolder, r => new ContinuousQueryFilterHolder(r));
+            AddSystemType(BinaryUtils.TypeSerializableHolder, r => new SerializableObjectHolder(r),
                 serializer: new SerializableSerializer());
-            AddSystemType(BinaryUtils.TypeDateTimeHolder, w => new DateTimeHolder(w),
+            AddSystemType(BinaryUtils.TypeDateTimeHolder, r => new DateTimeHolder(r),
                 serializer: new DateTimeSerializer());
-            AddSystemType(BinaryUtils.TypeCacheEntryProcessorHolder, w => new CacheEntryProcessorHolder(w));
-            AddSystemType(BinaryUtils.TypeCacheEntryPredicateHolder, w => new CacheEntryFilterHolder(w));
-            AddSystemType(BinaryUtils.TypeMessageListenerHolder, w => new MessageListenerHolder(w));
-            AddSystemType(BinaryUtils.TypeStreamReceiverHolder, w => new StreamReceiverHolder(w));
-            AddSystemType(0, w => new AffinityKey(w), "affKey");
-            AddSystemType(BinaryUtils.TypePlatformJavaObjectFactoryProxy, w => new PlatformJavaObjectFactoryProxy());
-            AddSystemType(0, w => new ObjectInfoHolder(w));
+            AddSystemType(BinaryUtils.TypeCacheEntryProcessorHolder, r => new CacheEntryProcessorHolder(r));
+            AddSystemType(BinaryUtils.TypeCacheEntryPredicateHolder, r => new CacheEntryFilterHolder(r));
+            AddSystemType(BinaryUtils.TypeMessageListenerHolder, r => new MessageListenerHolder(r));
+            AddSystemType(BinaryUtils.TypeStreamReceiverHolder, r => new StreamReceiverHolder(r));
+            AddSystemType(0, r => new AffinityKey(r), "affKey");
+            AddSystemType(BinaryUtils.TypePlatformJavaObjectFactoryProxy, r => new PlatformJavaObjectFactoryProxy());
+            AddSystemType(0, r => new ObjectInfoHolder(r));
         }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableObjectHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableObjectHolder.cs
index 08b44df..99c8f49 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableObjectHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableObjectHolder.cs
@@ -67,13 +67,11 @@
         /// Initializes a new instance of the <see cref="SerializableObjectHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public SerializableObjectHolder(IBinaryReader reader)
+        public SerializableObjectHolder(BinaryReader reader)
         {
             Debug.Assert(reader != null);
 
-            var reader0 = (BinaryReader) reader.GetRawReader();
-
-            using (var streamAdapter = new BinaryStreamAdapter(reader0.Stream))
+            using (var streamAdapter = new BinaryStreamAdapter(reader.Stream))
             {
                 _item = new BinaryFormatter().Deserialize(streamAdapter, null);
             }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs
index 888445a..5d940c5 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs
@@ -209,7 +209,7 @@
             Debug.Assert(stream != null);
             Debug.Assert(marsh != null);
 
-            IBinaryRawReader reader = marsh.StartUnmarshal(stream);
+            var reader = marsh.StartUnmarshal(stream);
 
             var partCnt = reader.ReadInt();
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryFilterHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryFilterHolder.cs
index 4487c59..90db02c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryFilterHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryFilterHolder.cs
@@ -88,15 +88,13 @@
         /// Initializes a new instance of the <see cref="CacheEntryFilterHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public CacheEntryFilterHolder(IBinaryReader reader)
+        public CacheEntryFilterHolder(BinaryReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
+            _pred = reader.ReadObject<object>();
 
-            _pred = reader0.ReadObject<object>();
+            _keepBinary = reader.ReadBoolean();
 
-            _keepBinary = reader0.ReadBoolean();
-
-            _marsh = reader0.Marshaller;
+            _marsh = reader.Marshaller;
 
             _invoker = GetInvoker(_pred);
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryProcessorHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryProcessorHolder.cs
index a0f8f3a..9fc7c7d 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryProcessorHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheEntryProcessorHolder.cs
@@ -113,12 +113,10 @@
         /// Initializes a new instance of the <see cref="CacheEntryProcessorHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public CacheEntryProcessorHolder(IBinaryReader reader)
+        public CacheEntryProcessorHolder(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader) reader.GetRawReader();
-
-            _proc = reader0.ReadObject<object>();
-            _arg = reader0.ReadObject<object>();
+            _proc = reader.ReadObject<object>();
+            _arg = reader.ReadObject<object>();
 
             _processFunc = GetProcessFunc(_proc);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryFilterHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryFilterHolder.cs
index c2e7762..6eebbbe 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryFilterHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Query/Continuous/ContinuousQueryFilterHolder.cs
@@ -75,12 +75,10 @@
         /// Initializes a new instance of the <see cref="ContinuousQueryFilterHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ContinuousQueryFilterHolder(IBinaryReader reader)
+        public ContinuousQueryFilterHolder(IBinaryRawReader reader)
         {
-            var rawReader = (BinaryReader) reader.GetRawReader();
-
-            _filter = rawReader.ReadObject<object>();
-            _keepBinary = rawReader.ReadBoolean();
+            _filter = reader.ReadObject<object>();
+            _keepBinary = reader.ReadBoolean();
         }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Classpath.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Classpath.cs
index be68074..6c4040b 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Classpath.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Classpath.cs
@@ -20,6 +20,7 @@
     using System;
     using System.Text;
     using System.IO;
+    using Apache.Ignite.Core.Log;
 
     /// <summary>
     /// Classpath resolver.
@@ -37,11 +38,13 @@
         /// </summary>
         /// <param name="cfg">The configuration.</param>
         /// <param name="forceTestClasspath">Append test directories even if
-        ///     <see cref="EnvIgniteNativeTestClasspath" /> is not set.</param>
+        /// <see cref="EnvIgniteNativeTestClasspath" /> is not set.</param>
+        /// <param name="log">The log.</param>
         /// <returns>
         /// Classpath string.
         /// </returns>
-        internal static string CreateClasspath(IgniteConfiguration cfg = null, bool forceTestClasspath = false)
+        internal static string CreateClasspath(IgniteConfiguration cfg = null, bool forceTestClasspath = false, 
+            ILogger log = null)
         {
             var cpStr = new StringBuilder();
 
@@ -53,11 +56,14 @@
                     cpStr.Append(';');
             }
 
-            var ggHome = IgniteHome.Resolve(cfg);
+            var ggHome = IgniteHome.Resolve(cfg, log);
 
             if (!string.IsNullOrWhiteSpace(ggHome))
                 AppendHomeClasspath(ggHome, forceTestClasspath, cpStr);
 
+            if (log != null)
+                log.Debug("Classpath resolved to: " + cpStr);
+
             return ClasspathPrefix + cpStr;
         }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Fnv1Hash.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Fnv1Hash.cs
index 231220a..3c2c871 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Fnv1Hash.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/Fnv1Hash.cs
@@ -18,7 +18,7 @@
 namespace Apache.Ignite.Core.Impl.Common
 {
     /// <summary>
-    /// Fowler–Noll–Vo hash function.
+    /// Fowler-Noll-Vo hash function.
     /// </summary>
     internal static class Fnv1Hash
     {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteHome.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteHome.cs
index 6485201..08f6d84 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteHome.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteHome.cs
@@ -22,6 +22,7 @@
     using System.IO;
     using System.Reflection;
     using Apache.Ignite.Core.Common;
+    using Apache.Ignite.Core.Log;
 
     /// <summary>
     /// IgniteHome resolver.
@@ -35,29 +36,40 @@
         /// Calculate Ignite home.
         /// </summary>
         /// <param name="cfg">Configuration.</param>
-        /// <returns></returns>
-        public static string Resolve(IgniteConfiguration cfg)
+        /// <param name="log">The log.</param>
+        public static string Resolve(IgniteConfiguration cfg, ILogger log = null)
         {
             var home = cfg == null ? null : cfg.IgniteHome;
 
             if (string.IsNullOrWhiteSpace(home))
+            {
                 home = Environment.GetEnvironmentVariable(EnvIgniteHome);
+
+                if (log != null)
+                    log.Debug("IgniteHome retrieved from {0} environment variable: '{1}'", EnvIgniteHome, home);
+            }
             else if (!IsIgniteHome(new DirectoryInfo(home)))
                 throw new IgniteException(string.Format("IgniteConfiguration.IgniteHome is not valid: '{0}'", home));
 
             if (string.IsNullOrWhiteSpace(home))
-                home = Resolve();
+                home = Resolve(log);
             else if (!IsIgniteHome(new DirectoryInfo(home)))
                 throw new IgniteException(string.Format("{0} is not valid: '{1}'", EnvIgniteHome, home));
 
+            if (log != null)
+                log.Debug("IgniteHome resolved to '{0}'", home);
+
             return home;
         }
 
         /// <summary>
         /// Automatically resolve Ignite home directory.
         /// </summary>
-        /// <returns>Ignite home directory.</returns>
-        private static string Resolve()
+        /// <param name="log">The log.</param>
+        /// <returns>
+        /// Ignite home directory.
+        /// </returns>
+        private static string Resolve(ILogger log)
         {
             var probeDirs = new[]
             {
@@ -65,8 +77,16 @@
                 Directory.GetCurrentDirectory()
             };
 
+            if (log != null)
+                log.Debug("Attempting to resolve IgniteHome in the assembly directory " +
+                          "'{0}' and current directory '{1}'...", probeDirs[0], probeDirs[1]);
+
+
             foreach (var probeDir in probeDirs.Where(x => !string.IsNullOrEmpty(x)))
             {
+                if (log != null)
+                    log.Debug("Probing IgniteHome in '{0}'...", probeDir);
+
                 var dir = new DirectoryInfo(probeDir);
 
                 while (dir != null)
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeActionJob.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeActionJob.cs
index d7c4311..55332d5 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeActionJob.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeActionJob.cs
@@ -72,11 +72,9 @@
         /// Initializes a new instance of the <see cref="ComputeActionJob"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ComputeActionJob(IBinaryReader reader)
+        public ComputeActionJob(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
-
-            _action = reader0.ReadObject<IComputeAction>();
+            _action = reader.ReadObject<IComputeAction>();
         }
     }
 }
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeFuncJob.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeFuncJob.cs
index 8f76fcf..0cd8df2 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeFuncJob.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeFuncJob.cs
@@ -75,12 +75,10 @@
         /// Initializes a new instance of the <see cref="ComputeFuncJob"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ComputeFuncJob(IBinaryReader reader)
+        public ComputeFuncJob(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader) reader.GetRawReader();
-
-            _clo = reader0.ReadObject<IComputeFunc>();
-            _arg = reader0.ReadObject<object>();
+            _clo = reader.ReadObject<IComputeFunc>();
+            _arg = reader.ReadObject<object>();
         }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeOutFuncJob.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeOutFuncJob.cs
index c99d821..abdf448 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeOutFuncJob.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Closure/ComputeOutFuncJob.cs
@@ -65,11 +65,12 @@
             writer0.WithDetach(w => w.WriteObject(_clo));
         }
 
-        public ComputeOutFuncJob(IBinaryReader reader)
+        /// <summary>
+        /// Initializes a new instance of the <see cref="ComputeOutFuncJob" /> class.
+        /// </summary>
+        public ComputeOutFuncJob(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader) reader.GetRawReader();
-
-            _clo = reader0.ReadObject<IComputeOutFunc>();
+            _clo = reader.ReadObject<IComputeOutFunc>();
         }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeFunc.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeFunc.cs
index 1f5523c..62261c9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeFunc.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeFunc.cs
@@ -85,11 +85,9 @@
         /// Initializes a new instance of the <see cref="ComputeFuncWrapper"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ComputeFuncWrapper(IBinaryReader reader)
+        public ComputeFuncWrapper(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
-
-            _func = reader0.ReadObject<object>();
+            _func = reader.ReadObject<object>();
 
             _invoker = DelegateTypeDescriptor.GetComputeFunc(_func.GetType());
         }
@@ -98,6 +96,7 @@
         /// Injects the Ignite instance.
         /// </summary>
         [InstanceResource]
+        // ReSharper disable once UnusedMember.Global (used by injector)
         public void InjectIgnite(IIgnite ignite)
         {
             // Propagate injection
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJob.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJob.cs
index 9fa1377..d2beb2c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJob.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJob.cs
@@ -52,11 +52,9 @@
         /// Initializes a new instance of the <see cref="ComputeJobWrapper"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ComputeJobWrapper(IBinaryReader reader)
+        public ComputeJobWrapper(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
-
-            _job = reader0.ReadObject<object>();
+            _job = reader.ReadObject<object>();
 
             DelegateTypeDescriptor.GetComputeJob(_job.GetType(), out _execute, out _cancel);
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJobHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJobHolder.cs
index 0d93010..6389730 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJobHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeJobHolder.cs
@@ -47,15 +47,13 @@
         /// Default ctor for marshalling.
         /// </summary>
         /// <param name="reader"></param>
-        public ComputeJobHolder(IBinaryReader reader)
+        public ComputeJobHolder(BinaryReader reader)
         {
             Debug.Assert(reader != null);
 
-            var reader0 = (BinaryReader) reader.GetRawReader();
+            _ignite = reader.Marshaller.Ignite;
 
-            _ignite = reader0.Marshaller.Ignite;
-
-            _job = reader0.ReadObject<IComputeJob>();
+            _job = reader.ReadObject<IComputeJob>();
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeOutFunc.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeOutFunc.cs
index 974ada2..f973ae5 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeOutFunc.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeOutFunc.cs
@@ -89,11 +89,9 @@
         /// Initializes a new instance of the <see cref="ComputeOutFuncWrapper"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public ComputeOutFuncWrapper(IBinaryReader reader)
+        public ComputeOutFuncWrapper(IBinaryRawReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
-
-            _func = reader0.ReadObject<object>();
+            _func = reader.ReadObject<object>();
 
             _invoker = DelegateTypeDescriptor.GetComputeOutFunc(_func.GetType());
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Datastream/StreamReceiverHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Datastream/StreamReceiverHolder.cs
index 90ade5a..953ddb6 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Datastream/StreamReceiverHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Datastream/StreamReceiverHolder.cs
@@ -50,7 +50,7 @@
         /// Initializes a new instance of the <see cref="StreamReceiverHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public StreamReceiverHolder(BinaryReader reader)
+        public StreamReceiverHolder(IBinaryRawReader reader)
         {
             var rcvType = reader.ReadByte();
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Handle/HandleRegistry.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Handle/HandleRegistry.cs
index 4e1135a..588d608 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Handle/HandleRegistry.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Handle/HandleRegistry.cs
@@ -102,6 +102,17 @@
         }
 
         /// <summary>
+        /// Allocate a handle for critical resource in safe mode.
+        /// </summary>
+        /// <param name="target">Target.</param>
+        /// <returns>Pointer.</returns>
+        [ExcludeFromCodeCoverage]
+        public long AllocateCriticalSafe(object target)
+        {
+            return Allocate0(target, true, true);
+        }
+
+        /// <summary>
         /// Internal allocation routine.
         /// </summary>
         /// <param name="target">Target.</param>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs
index de9daae..414452b 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs
@@ -27,7 +27,6 @@
     using System.Reflection;
     using System.Runtime.InteropServices;
     using System.Text;
-    using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Common;
     using Apache.Ignite.Core.Impl.Binary;
@@ -464,7 +463,7 @@
         /// <param name="reader">Reader.</param>
         /// <param name="pred">The predicate.</param>
         /// <returns> Nodes list or null. </returns>
-        public static List<IClusterNode> ReadNodes(IBinaryRawReader reader, Func<ClusterNodeImpl, bool> pred = null)
+        public static List<IClusterNode> ReadNodes(BinaryReader reader, Func<ClusterNodeImpl, bool> pred = null)
         {
             var cnt = reader.ReadInt();
 
@@ -473,7 +472,7 @@
 
             var res = new List<IClusterNode>(cnt);
 
-            var ignite = ((BinaryReader)reader).Marshaller.Ignite;
+            var ignite = reader.Marshaller.Ignite;
 
             if (pred == null)
             {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Memory/PlatformBigEndianMemoryStream.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Memory/PlatformBigEndianMemoryStream.cs
index d59d572..70f9127 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Memory/PlatformBigEndianMemoryStream.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Memory/PlatformBigEndianMemoryStream.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 namespace Apache.Ignite.Core.Impl.Memory
 {
@@ -488,4 +488,4 @@
 
         #endregion
     }
-}
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Messaging/MessageListenerHolder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Messaging/MessageListenerHolder.cs
index 1a2c18a..8e794d1 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Messaging/MessageListenerHolder.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Messaging/MessageListenerHolder.cs
@@ -155,15 +155,13 @@
         /// Initializes a new instance of the <see cref="MessageListenerHolder"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        public MessageListenerHolder(IBinaryReader reader)
+        public MessageListenerHolder(BinaryReader reader)
         {
-            var reader0 = (BinaryReader)reader.GetRawReader();
-
-            _filter = reader0.ReadObject<object>();
+            _filter = reader.ReadObject<object>();
 
             _invoker = GetInvoker(_filter);
 
-            _ignite = reader0.Marshaller.Ignite;
+            _ignite = reader.Marshaller.Ignite;
 
             ResourceProcessor.Inject(_filter, _ignite);
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Unmanaged/UnmanagedCallbacks.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Unmanaged/UnmanagedCallbacks.cs
index 95a4633..cc205e8 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Unmanaged/UnmanagedCallbacks.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Unmanaged/UnmanagedCallbacks.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -21,8 +21,8 @@
     using System.Collections.Generic;
     using System.Diagnostics;
     using System.Diagnostics.CodeAnalysis;
-    using System.IO;
     using System.Globalization;
+    using System.IO;
     using System.Runtime.InteropServices;
     using System.Threading;
     using Apache.Ignite.Core.Cache.Affinity;
@@ -53,9 +53,9 @@
     /// Unmanaged callbacks.
     /// </summary>
     [SuppressMessage("ReSharper", "UnusedMember.Local")]
-    [SuppressMessage("Microsoft.Design", "CA1001:TypesThatOwnDisposableFieldsShouldBeDisposable", 
+    [SuppressMessage("Microsoft.Design", "CA1001:TypesThatOwnDisposableFieldsShouldBeDisposable",
         Justification = "This class instance usually lives as long as the app runs.")]
-    [SuppressMessage("Microsoft.Design", "CA1049:TypesThatOwnNativeResourcesShouldBeDisposable", 
+    [SuppressMessage("Microsoft.Design", "CA1049:TypesThatOwnNativeResourcesShouldBeDisposable",
         Justification = "This class instance usually lives as long as the app runs.")]
     internal unsafe class UnmanagedCallbacks
     {
@@ -71,7 +71,7 @@
 
         /** Handle registry. */
         private readonly HandleRegistry _handleRegistry = new HandleRegistry();
-        
+
         /** Grid. */
         private volatile Ignite _ignite;
 
@@ -155,7 +155,7 @@
         private delegate long MessagingFilterCreateCallbackDelegate(void* target, long memPtr);
         private delegate int MessagingFilterApplyCallbackDelegate(void* target, long ptr, long memPtr);
         private delegate void MessagingFilterDestroyCallbackDelegate(void* target, long ptr);
-        
+
         private delegate long EventFilterCreateCallbackDelegate(void* target, long memPtr);
         private delegate int EventFilterApplyCallbackDelegate(void* target, long ptr, long memPtr);
         private delegate void EventFilterDestroyCallbackDelegate(void* target, long ptr);
@@ -171,7 +171,7 @@
 
         private delegate void OnStartCallbackDelegate(void* target, void* proc, long memPtr);
         private delegate void OnStopCallbackDelegate(void* target);
-        
+
         private delegate void ErrorCallbackDelegate(void* target, int errType, sbyte* errClsChars, int errClsCharsLen, sbyte* errMsgChars, int errMsgCharsLen, sbyte* stackTraceChars, int stackTraceCharsLen, void* errData, int errDataLen);
 
         private delegate long ExtensionCallbackInLongOutLongDelegate(void* target, int typ, long arg1);
@@ -209,7 +209,7 @@
                 cacheStoreDestroy = CreateFunctionPointer((CacheStoreDestroyCallbackDelegate) CacheStoreDestroy),
 
                 cacheStoreSessionCreate = CreateFunctionPointer((CacheStoreSessionCreateCallbackDelegate) CacheStoreSessionCreate),
-                
+
                 cacheEntryFilterCreate = CreateFunctionPointer((CacheEntryFilterCreateCallbackDelegate)CacheEntryFilterCreate),
                 cacheEntryFilterApply = CreateFunctionPointer((CacheEntryFilterApplyCallbackDelegate)CacheEntryFilterApply),
                 cacheEntryFilterDestroy = CreateFunctionPointer((CacheEntryFilterDestroyCallbackDelegate)CacheEntryFilterDestroy),
@@ -238,7 +238,7 @@
                     CreateFunctionPointer((DataStreamerTopologyUpdateCallbackDelegate) DataStreamerTopologyUpdate),
                 dataStreamerStreamReceiverInvoke =
                     CreateFunctionPointer((DataStreamerStreamReceiverInvokeCallbackDelegate) DataStreamerStreamReceiverInvoke),
-                
+
                 futureByteResult = CreateFunctionPointer((FutureByteResultCallbackDelegate) FutureByteResult),
                 futureBoolResult = CreateFunctionPointer((FutureBoolResultCallbackDelegate) FutureBoolResult),
                 futureShortResult = CreateFunctionPointer((FutureShortResultCallbackDelegate) FutureShortResult),
@@ -253,7 +253,7 @@
                 lifecycleOnEvent = CreateFunctionPointer((LifecycleOnEventCallbackDelegate) LifecycleOnEvent),
                 memoryReallocate = CreateFunctionPointer((MemoryReallocateCallbackDelegate) MemoryReallocate),
                 nodeInfo = CreateFunctionPointer((NodeInfoCallbackDelegate) NodeInfo),
-                
+
                 messagingFilterCreate = CreateFunctionPointer((MessagingFilterCreateCallbackDelegate)MessagingFilterCreate),
                 messagingFilterApply = CreateFunctionPointer((MessagingFilterApplyCallbackDelegate)MessagingFilterApply),
                 messagingFilterDestroy = CreateFunctionPointer((MessagingFilterDestroyCallbackDelegate)MessagingFilterDestroy),
@@ -268,11 +268,11 @@
                 serviceInvokeMethod = CreateFunctionPointer((ServiceInvokeMethodCallbackDelegate)ServiceInvokeMethod),
 
                 clusterNodeFilterApply = CreateFunctionPointer((ClusterNodeFilterApplyCallbackDelegate)ClusterNodeFilterApply),
-                
+
                 onStart = CreateFunctionPointer((OnStartCallbackDelegate)OnStart),
                 onStop = CreateFunctionPointer((OnStopCallbackDelegate)OnStop),
                 error = CreateFunctionPointer((ErrorCallbackDelegate)Error),
-                
+
                 extensionCbInLongOutLong = CreateFunctionPointer((ExtensionCallbackInLongOutLongDelegate)ExtensionCallbackInLongOutLong),
                 extensionCbInLongLongOutLong = CreateFunctionPointer((ExtensionCallbackInLongLongOutLongDelegate)ExtensionCallbackInLongLongOutLong),
 
@@ -456,7 +456,7 @@
                 {
                     return task.JobResultLocal(Job(jobPtr));
                 }
-                
+
                 using (var stream = IgniteManager.Memory.Get(memPtr).GetStream())
                 {
                     return task.JobResultRemote(Job(jobPtr), stream);
@@ -600,7 +600,7 @@
                     var filterHolder = reader.ReadObject<ContinuousQueryFilterHolder>();
 
                     // 2. Create real filter from it's holder.
-                    var filter = (IContinuousQueryFilter) DelegateTypeDescriptor.GetContinuousQueryFilterCtor(
+                    var filter = (IContinuousQueryFilter)DelegateTypeDescriptor.GetContinuousQueryFilterCtor(
                         filterHolder.Filter.GetType())(filterHolder.Filter, filterHolder.KeepBinary);
 
                     // 3. Inject grid.
@@ -634,7 +634,7 @@
                 holder.Release();
             });
         }
-        
+
         #endregion
 
         #region IMPLEMENTATION: DATA STREAMER
@@ -680,7 +680,7 @@
         }
 
         #endregion
-        
+
         #region IMPLEMENTATION: FUTURES
 
         private void FutureByteResult(void* target, long futPtr, int res)
@@ -856,7 +856,7 @@
             return SafeCall(() =>
             {
                 var holder = _ignite.HandleRegistry.Get<MessageListenerHolder>(ptr, false);
-                
+
                 if (holder == null)
                     return 0;
 
@@ -874,7 +874,7 @@
                 _ignite.HandleRegistry.Release(ptr);
             });
         }
-        
+
         #endregion
 
         #region IMPLEMENTATION: EXTENSIONS
@@ -937,7 +937,7 @@
                 _ignite.HandleRegistry.Release(ptr);
             });
         }
-        
+
         #endregion
 
         #region IMPLEMENTATION: SERVICES
@@ -1093,7 +1093,7 @@
             if (ignite != null)
                 ignite.AfterNodeStop();
         }
-        
+
         private void Error(void* target, int errType, sbyte* errClsChars, int errClsCharsLen, sbyte* errMsgChars,
             int errMsgCharsLen, sbyte* stackTraceChars, int stackTraceCharsLen, void* errData, int errDataLen)
         {
@@ -1387,7 +1387,7 @@
         public void Cleanup()
         {
             _ignite = null;
-            
+
             _handleRegistry.Close();
         }
 
@@ -1399,4 +1399,4 @@
             get { return ConsoleWritePtr; }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/NuGet/LINQPad/BinaryModeExample.linq b/modules/platforms/dotnet/Apache.Ignite.Core/NuGet/LINQPad/BinaryModeExample.linq
new file mode 100644
index 0000000..b90402d
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/NuGet/LINQPad/BinaryModeExample.linq
@@ -0,0 +1,94 @@
+<Query Kind="Statements">
+  <NuGetReference>Apache.Ignite</NuGetReference>
+  <Namespace>Apache.Ignite.Core</Namespace>
+  <Namespace>Apache.Ignite.Core.Binary</Namespace>
+  <Namespace>Apache.Ignite.Core.Cache.Configuration</Namespace>
+  <Namespace>Apache.Ignite.Core.Cache.Query</Namespace>
+</Query>
+
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/// <summary>
+/// This example works with cache entirely in binary mode: no classes are needed.
+/// 
+/// Requirements:
+/// * Java Runtime Environment (JRE): http://www.oracle.com/technetwork/java/javase/downloads/index.html (x86 for regular LINQPad, x64 for AnyCPU LINQPad)
+/// </summary>
+
+// Force new LINQPad query process to reinit JVM.
+Util.NewProcess = true;
+
+// Start instance.
+using (var ignite = Ignition.Start())
+{	
+    // Create new cache and configure queries for Person binary type.
+    var cache0 = ignite.GetOrCreateCache<object, object>(new CacheConfiguration
+    {
+        Name = "persons",
+        QueryEntities = new[]
+        {
+            new QueryEntity
+            {
+                KeyType = typeof(int),
+                ValueTypeName = "Person",
+                Fields = new[]
+                {
+                    new QueryField("Name", typeof(string)),
+                    new QueryField("Age", typeof(int))
+                }
+            },
+        }
+    });
+
+	// Switch to binary mode to work with data in serialized form.
+	var cache = cache0.WithKeepBinary<int, IBinaryObject>();
+	
+	// Populate cache by creating objects with binary builder.
+	var binary = cache.Ignite.GetBinary();
+
+	cache[1] = binary.GetBuilder("Person")
+		.SetField("Name", "James Wilson").SetField("Age", 23).Build();
+
+	cache[2] = binary.GetBuilder("Person")
+		.SetField("Name", "Daniel Adams").SetField("Age", 56).Build();
+
+	cache[3] = binary.GetBuilder("Person")
+		.SetField("Name", "Cristian Moss").SetField("Age", 40).Build();
+
+	cache[4] = binary.GetBuilder("Person")
+		.SetField("Name", "Allison Mathis").SetField("Age", 32).Build();
+		
+	// Read a cache entry field in binary mode.
+	var person = cache[1];
+	
+	var name = person.GetField<string>("Name");	
+	name.Dump("Name of the person with id 1:");
+	
+	// Modify an entry.
+	cache[1] = person.ToBuilder().SetField("Name", name + " Jr.").Build();
+	cache[1].ToString().Dump("Modified person with id 1:");
+	
+	// Run SQL query.
+	cache.Query(new SqlQuery("Person", "age < 40"))
+		.Select(x => x.Value.ToString())
+		.Dump("Persons with age less than 40:");
+		
+	// Run SQL fields query.
+	cache.QueryFields(new SqlFieldsQuery("select name from Person order by name"))
+		.Dump("All person names:");
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
index 9fcbeb0..9b5eea4 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System;
 using System.Reflection;
@@ -25,7 +25,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2016")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -45,4 +45,4 @@
 [assembly: InternalsVisibleTo("Apache.Ignite.Benchmarks, PublicKey=0024000004800000940000000602000000240000525341310004000001000100a3e0c1df4cbedbd4ed0e88808401c69b69ec12575ed1c056ac9f448e018fb29af19d236b7b03563aad66c48ab2045e72971ed098d4f65d4cdd38d65abcb39b4f84c626b22ccab2754375f0e8c97dc304fa146f0eddad5cc40a71803a8f15b0b0bb0bff0d4bf0ff6a64bb1044e0d71e6e2405b83fd4c1f7b3e2cfc2e9d50823d4")]
 [assembly: InternalsVisibleTo("Apache.Ignite.AspNet.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c9380ce05eb74bd7c531f72e9ea615c59d7eceb09bd9795cb3dff1fcf638fd799c2a58a9be42fff156efe1c8cdebb751e27763f6c9a7c80cdc1dc1bbf44283608ef18ccd5017fd57b2b026503637c89c2537f361807f3bdd49265f4d444716159d989342561d324b1a0961640338bb32eaf67f4ae0c95f1b210f65404b0909c6")]
 
-#endif
+#endif
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.csproj
new file mode 100644
index 0000000..9711087
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.csproj
@@ -0,0 +1,96 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProjectGuid>{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Apache.Ignite.EntityFramework.Tests</RootNamespace>
+    <AssemblyName>Apache.Ignite.EntityFramework.Tests</AssemblyName>
+    <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SignAssembly>true</SignAssembly>
+  </PropertyGroup>
+  <PropertyGroup>
+    <AssemblyOriginatorKeyFile>Apache.Ignite.EntityFramework.Tests.snk</AssemblyOriginatorKeyFile>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="EntityFramework">
+      <HintPath>..\packages\EntityFramework.6.1.3\lib\net40\EntityFramework.dll</HintPath>
+    </Reference>
+    <Reference Include="EntityFramework.SqlServerCompact">
+      <HintPath>..\packages\EntityFramework.SqlServerCompact.6.1.3\lib\net40\EntityFramework.SqlServerCompact.dll</HintPath>
+    </Reference>
+    <Reference Include="nunit.framework">
+      <HintPath>..\packages\NUnit.Runners.2.6.3\tools\nunit.framework.dll</HintPath>
+    </Reference>
+    <Reference Include="System" />
+    <Reference Include="System.Core" />
+    <Reference Include="System.Data.SqlServerCe, Version=4.0.0.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91, processorArchitecture=MSIL">
+      <SpecificVersion>False</SpecificVersion>
+      <HintPath>..\packages\Microsoft.SqlServer.Compact.4.0.8876.1\lib\net40\System.Data.SqlServerCe.dll</HintPath>
+    </Reference>
+    <Reference Include="System.Transactions" />
+    <Reference Include="System.Data.DataSetExtensions" />
+    <Reference Include="System.Data" />
+    <Reference Include="System.Xml" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="ArrayDbDataReaderTests.cs" />
+    <Compile Include="DbCachingPolicyTest.cs" />
+    <Compile Include="EntityFrameworkCacheInitializationTest.cs" />
+    <Compile Include="EntityFrameworkCacheTest.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\Apache.Ignite.Core.Tests\Apache.Ignite.Core.Tests.csproj">
+      <Project>{6a62f66c-da5b-4fbb-8ce7-a95f740fdc7a}</Project>
+      <Name>Apache.Ignite.Core.Tests</Name>
+    </ProjectReference>
+    <ProjectReference Include="..\Apache.Ignite.Core\Apache.Ignite.Core.csproj">
+      <Project>{4cd2f726-7e2b-46c4-a5ba-057bb82eecb6}</Project>
+      <Name>Apache.Ignite.Core</Name>
+    </ProjectReference>
+    <ProjectReference Include="..\Apache.Ignite.EntityFramework\Apache.Ignite.EntityFramework.csproj">
+      <Project>{c558518a-c1a0-4224-aaa9-a8688474b4dc}</Project>
+      <Name>Apache.Ignite.EntityFramework</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="Apache.Ignite.EntityFramework.Tests.snk" />
+    <None Include="App.config" />
+    <None Include="packages.config">
+      <SubType>Designer</SubType>
+    </None>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <PropertyGroup>
+    <PostBuildEvent>
+		if not exist "$(TargetDir)x86" md "$(TargetDir)x86"
+		xcopy /s /y "$(SolutionDir)packages\Microsoft.SqlServer.Compact.4.0.8876.1\NativeBinaries\x86\*.*" "$(TargetDir)x86"
+		if not exist "$(TargetDir)amd64" md "$(TargetDir)amd64"
+		xcopy /s /y "$(SolutionDir)packages\Microsoft.SqlServer.Compact.4.0.8876.1\NativeBinaries\amd64\*.*" "$(TargetDir)amd64"
+	</PostBuildEvent>
+  </PropertyGroup>
+</Project>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.snk b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.snk
new file mode 100644
index 0000000..5ef85a6
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Apache.Ignite.EntityFramework.Tests.snk
Binary files differ
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/App.config b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/App.config
new file mode 100644
index 0000000..3527920
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/App.config
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <configSections>
+        <section name="igniteConfiguration" type="Apache.Ignite.Core.IgniteConfigurationSection, Apache.Ignite.Core" />
+        <section name="igniteConfiguration2" type="Apache.Ignite.Core.IgniteConfigurationSection, Apache.Ignite.Core" />
+        <section name="entityFramework" type="System.Data.Entity.Internal.ConfigFile.EntityFrameworkSection, EntityFramework, Version=6.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" requirePermission="false" />
+    </configSections>
+    <runtime>
+        <gcServer enabled="true" />
+    </runtime>
+
+    <igniteConfiguration xmlns="http://ignite.apache.org/schema/dotnet/IgniteConfigurationSection" gridName="myGrid1">
+        <discoverySpi type="TcpDiscoverySpi">
+            <ipFinder type="TcpDiscoveryStaticIpFinder">
+                <endpoints>
+                    <string>127.0.0.1:47500</string>
+                </endpoints>
+            </ipFinder>
+        </discoverySpi>
+        <cacheConfiguration>
+            <cacheConfiguration name="cacheName" />
+        </cacheConfiguration>
+    </igniteConfiguration>
+
+    <igniteConfiguration2 gridName="myGrid2" localhost="127.0.0.1">
+        <discoverySpi type="TcpDiscoverySpi">
+            <ipFinder type="TcpDiscoveryStaticIpFinder">
+                <endpoints>
+                    <string>127.0.0.1:47500</string>
+                </endpoints>
+            </ipFinder>
+        </discoverySpi>
+        <cacheConfiguration>
+            <cacheConfiguration name="cacheName2" atomicityMode="Transactional" />
+        </cacheConfiguration>
+    </igniteConfiguration2>
+
+    <entityFramework>
+        <defaultConnectionFactory type="System.Data.Entity.Infrastructure.SqlCeConnectionFactory, EntityFramework">
+            <parameters>
+                <parameter value="System.Data.SqlServerCe.4.0" />
+            </parameters>
+        </defaultConnectionFactory>
+        <providers>
+            <provider invariantName="System.Data.SqlServerCe.4.0" type="System.Data.Entity.SqlServerCompact.SqlCeProviderServices, EntityFramework.SqlServerCompact" />
+        </providers>
+    </entityFramework>
+
+    <system.data>
+        <DbProviderFactories>
+            <remove invariant="System.Data.SqlServerCe.4.0" />
+            <add name="Microsoft SQL Server Compact Data Provider 4.0" invariant="System.Data.SqlServerCe.4.0" description=".NET Framework Data Provider for Microsoft SQL Server Compact" type="System.Data.SqlServerCe.SqlCeProviderFactory, System.Data.SqlServerCe, Version=4.0.0.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91" />
+        </DbProviderFactories>
+    </system.data>
+</configuration>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/ArrayDbDataReaderTests.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/ArrayDbDataReaderTests.cs
new file mode 100644
index 0000000..f67fed4
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/ArrayDbDataReaderTests.cs
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Tests
+{
+    using System;
+    using System.Linq;
+    using Apache.Ignite.EntityFramework.Impl;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests for <see cref="ArrayDbDataReader"/>.
+    /// </summary>
+    public class ArrayDbDataReaderTests
+    {
+        /// <summary>
+        /// Tests the reader.
+        /// </summary>
+        [Test]
+        public void TestReader()
+        {
+            var dateTime = DateTime.Now;
+            var guid = Guid.NewGuid();
+
+            var data = new[]
+            {
+                new object[]
+                {
+                    (byte) 1, (short) 2, 3, (long) 4, (float) 5, (double) 6, (decimal) 7, "8", '9', dateTime,
+                    guid, false, new byte[] {1,2}, new[] {'a','b'}
+                }
+            };
+
+            var schema = new []
+            {
+                new DataReaderField("fbyte", typeof(byte), "by"),
+                new DataReaderField("fshort", typeof(short), "sh"),
+                new DataReaderField("fint", typeof(int), "in"),
+                new DataReaderField("flong", typeof(long), "lo"),
+                new DataReaderField("ffloat", typeof(float), "fl"),
+                new DataReaderField("fdouble", typeof(double), "do"),
+                new DataReaderField("fdecimal", typeof(decimal), "de"),
+                new DataReaderField("fstring", typeof(string), "st"),
+                new DataReaderField("fchar", typeof(char), "ch"),
+                new DataReaderField("fDateTime", typeof(DateTime), "Da"),
+                new DataReaderField("fGuid", typeof(Guid), "Gu"),
+                new DataReaderField("fbool", typeof(bool), "bo"),
+                new DataReaderField("fbytes", typeof(byte[]), "bb"),
+                new DataReaderField("fchars", typeof(char[]), "cc"),
+            };
+
+            // Create reader,
+            var reader = new ArrayDbDataReader(data, schema);
+
+            // Check basic props.
+            Assert.IsTrue(reader.Read());
+            Assert.AreEqual(0, reader.Depth);
+            Assert.AreEqual(-1, reader.RecordsAffected);
+            Assert.AreEqual(14, reader.FieldCount);
+            Assert.AreEqual(14, reader.VisibleFieldCount);
+            Assert.IsFalse(reader.IsClosed);
+            Assert.IsTrue(reader.HasRows);
+
+            // Check reading.
+            var data2 = new object[14];
+            Assert.AreEqual(14, reader.GetValues(data2));
+            Assert.AreEqual(data[0], data2);
+
+            Assert.AreEqual(1, reader.GetByte(reader.GetOrdinal("fbyte")));
+            Assert.AreEqual("by", reader.GetDataTypeName(0));
+            Assert.AreEqual(typeof(byte), reader.GetFieldType(0));
+            Assert.AreEqual("fbyte", reader.GetName(0));
+            Assert.AreEqual(1, reader["fbyte"]);
+            Assert.AreEqual(1, reader[0]);
+
+            Assert.AreEqual(2, reader.GetInt16(reader.GetOrdinal("fshort")));
+            Assert.AreEqual("sh", reader.GetDataTypeName(1));
+            Assert.AreEqual(typeof(short), reader.GetFieldType(1));
+            Assert.AreEqual("fshort", reader.GetName(1));
+            Assert.AreEqual(2, reader["fshort"]);
+            Assert.AreEqual(2, reader[1]);
+
+            Assert.AreEqual(3, reader.GetInt32(reader.GetOrdinal("fint")));
+            Assert.AreEqual("in", reader.GetDataTypeName(2));
+            Assert.AreEqual(typeof(int), reader.GetFieldType(2));
+            Assert.AreEqual("fint", reader.GetName(2));
+            Assert.AreEqual(3, reader["fint"]);
+            Assert.AreEqual(3, reader[2]);
+
+            Assert.AreEqual(4, reader.GetInt64(reader.GetOrdinal("flong")));
+            Assert.AreEqual("lo", reader.GetDataTypeName(3));
+            Assert.AreEqual(typeof(long), reader.GetFieldType(3));
+            Assert.AreEqual("flong", reader.GetName(3));
+            Assert.AreEqual(4, reader["flong"]);
+            Assert.AreEqual(4, reader[3]);
+
+            Assert.AreEqual(5, reader.GetFloat(reader.GetOrdinal("ffloat")));
+            Assert.AreEqual("fl", reader.GetDataTypeName(4));
+            Assert.AreEqual(typeof(float), reader.GetFieldType(4));
+            Assert.AreEqual("ffloat", reader.GetName(4));
+            Assert.AreEqual(5, reader["ffloat"]);
+            Assert.AreEqual(5, reader[4]);
+
+            Assert.AreEqual(6, reader.GetDouble(reader.GetOrdinal("fdouble")));
+            Assert.AreEqual("do", reader.GetDataTypeName(5));
+            Assert.AreEqual(typeof(double), reader.GetFieldType(5));
+            Assert.AreEqual("fdouble", reader.GetName(5));
+            Assert.AreEqual(6, reader["fdouble"]);
+            Assert.AreEqual(6, reader[5]);
+
+            Assert.AreEqual(7, reader.GetDecimal(reader.GetOrdinal("fdecimal")));
+            Assert.AreEqual("de", reader.GetDataTypeName(6));
+            Assert.AreEqual(typeof(decimal), reader.GetFieldType(6));
+            Assert.AreEqual("fdecimal", reader.GetName(6));
+            Assert.AreEqual(7, reader["fdecimal"]);
+            Assert.AreEqual(7, reader[6]);
+
+            Assert.AreEqual("8", reader.GetString(reader.GetOrdinal("fstring")));
+            Assert.AreEqual("st", reader.GetDataTypeName(7));
+            Assert.AreEqual(typeof(string), reader.GetFieldType(7));
+            Assert.AreEqual("fstring", reader.GetName(7));
+            Assert.AreEqual("8", reader["fstring"]);
+            Assert.AreEqual("8", reader[7]);
+
+            Assert.AreEqual('9', reader.GetChar(reader.GetOrdinal("fchar")));
+            Assert.AreEqual("ch", reader.GetDataTypeName(8));
+            Assert.AreEqual(typeof(char), reader.GetFieldType(8));
+            Assert.AreEqual("fchar", reader.GetName(8));
+            Assert.AreEqual('9', reader["fchar"]);
+            Assert.AreEqual('9', reader[8]);
+
+            Assert.AreEqual(dateTime, reader.GetDateTime(reader.GetOrdinal("fDateTime")));
+            Assert.AreEqual("Da", reader.GetDataTypeName(9));
+            Assert.AreEqual(typeof(DateTime), reader.GetFieldType(9));
+            Assert.AreEqual("fDateTime", reader.GetName(9));
+            Assert.AreEqual(dateTime, reader["fDateTime"]);
+            Assert.AreEqual(dateTime, reader[9]);
+
+            Assert.AreEqual(guid, reader.GetGuid(reader.GetOrdinal("fGuid")));
+            Assert.AreEqual("Gu", reader.GetDataTypeName(10));
+            Assert.AreEqual(typeof(Guid), reader.GetFieldType(10));
+            Assert.AreEqual("fGuid", reader.GetName(10));
+            Assert.AreEqual(guid, reader["fGuid"]);
+            Assert.AreEqual(guid, reader[10]);
+
+            Assert.AreEqual(false, reader.GetBoolean(reader.GetOrdinal("fbool")));
+            Assert.AreEqual("bo", reader.GetDataTypeName(11));
+            Assert.AreEqual(typeof(bool), reader.GetFieldType(11));
+            Assert.AreEqual("fbool", reader.GetName(11));
+            Assert.AreEqual(false, reader["fbool"]);
+            Assert.AreEqual(false, reader[11]);
+
+            var bytes = new byte[2];
+            Assert.AreEqual(2, reader.GetBytes(reader.GetOrdinal("fbytes"),0, bytes, 0, 2));
+            Assert.AreEqual(data[0][12], bytes);
+            Assert.AreEqual("bb", reader.GetDataTypeName(12));
+            Assert.AreEqual(typeof(byte[]), reader.GetFieldType(12));
+            Assert.AreEqual("fbytes", reader.GetName(12));
+            Assert.AreEqual(data[0][12], reader["fbytes"]);
+            Assert.AreEqual(data[0][12], reader[12]);
+
+            var chars = new char[2];
+            Assert.AreEqual(2, reader.GetChars(reader.GetOrdinal("fchars"),0, chars, 0, 2));
+            Assert.AreEqual(data[0][13], chars);
+            Assert.AreEqual("cc", reader.GetDataTypeName(13));
+            Assert.AreEqual(typeof(char[]), reader.GetFieldType(13));
+            Assert.AreEqual("fchars", reader.GetName(13));
+            Assert.AreEqual(data[0][13], reader["fchars"]);
+            Assert.AreEqual(data[0][13], reader[13]);
+
+            Assert.IsFalse(Enumerable.Range(0, 14).Any(x => reader.IsDBNull(x)));
+
+            // Close.
+            reader.Close();
+            Assert.IsTrue(reader.IsClosed);
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/DbCachingPolicyTest.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/DbCachingPolicyTest.cs
new file mode 100644
index 0000000..c9456b6
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/DbCachingPolicyTest.cs
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Tests
+{
+    using System;
+    using Apache.Ignite.EntityFramework;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests for <see cref="DbCachingPolicy"/>.
+    /// </summary>
+    public class DbCachingPolicyTest
+    {
+        /// <summary>
+        /// Tests the default implementation.
+        /// </summary>
+        [Test]
+        public void TestDefaultImpl()
+        {
+            var plc = new DbCachingPolicy();
+
+            Assert.IsTrue(plc.CanBeCached(null));
+            Assert.IsTrue(plc.CanBeCached(null, 0));
+            Assert.AreEqual(TimeSpan.MaxValue, plc.GetExpirationTimeout(null));
+            Assert.AreEqual(DbCachingMode.ReadWrite, plc.GetCachingMode(null));
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheInitializationTest.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheInitializationTest.cs
new file mode 100644
index 0000000..36b1c2b
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheInitializationTest.cs
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Tests
+{
+    using System;
+    using Apache.Ignite.Core;
+    using Apache.Ignite.Core.Cache.Configuration;
+    using Apache.Ignite.Core.Common;
+    using Apache.Ignite.Core.Tests;
+    using Apache.Ignite.EntityFramework;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests the EF cache provider.
+    /// </summary>
+    public class EntityFrameworkCacheInitializationTest
+    {
+        /// <summary>
+        /// Fixture tear down.
+        /// </summary>
+        [TestFixtureTearDown]
+        public void TestFixtureTearDown()
+        {
+            Ignition.StopAll(true);
+        }
+
+        /// <summary>
+        /// Tests the IgniteDbConfiguration.
+        /// </summary>
+        [Test]
+        public void TestConfigurationAndStartup()
+        {
+            Environment.SetEnvironmentVariable("IGNITE_NATIVE_TEST_CLASSPATH", "true");
+
+            Assert.IsNull(Ignition.TryGetIgnite());
+
+            // Test default config (picks up app.config section).
+            CheckCacheAndStop("myGrid1", IgniteDbConfiguration.DefaultCacheNamePrefix, new IgniteDbConfiguration());
+
+            // Specific config section.
+            CheckCacheAndStop("myGrid2", "cacheName2",
+                new IgniteDbConfiguration("igniteConfiguration2", "cacheName2", null));
+
+            // Specific config section, nonexistent cache.
+            CheckCacheAndStop("myGrid2", "newCache",
+                new IgniteDbConfiguration("igniteConfiguration2", "newCache", null));
+
+            // In-code configuration.
+            CheckCacheAndStop("myGrid3", "myCache",
+                new IgniteDbConfiguration(new IgniteConfiguration
+                    {
+                        GridName = "myGrid3",
+                    }, new CacheConfiguration("myCache_metadata")
+                    {
+                        CacheMode = CacheMode.Replicated,
+                        AtomicityMode = CacheAtomicityMode.Transactional
+                    },
+                    new CacheConfiguration("myCache_data") {CacheMode = CacheMode.Replicated}, null),
+                CacheMode.Replicated);
+
+            // Existing instance.
+            var ignite = Ignition.Start(TestUtils.GetTestConfiguration());
+            CheckCacheAndStop(null, "123", new IgniteDbConfiguration(ignite,
+                new CacheConfiguration("123_metadata")
+                {
+                    Backups = 1,
+                    AtomicityMode = CacheAtomicityMode.Transactional
+                },
+                new CacheConfiguration("123_data"), null));
+
+            // Non-tx meta cache.
+            var ex = Assert.Throws<IgniteException>(() => CheckCacheAndStop(null, "123",
+                new IgniteDbConfiguration(TestUtils.GetTestConfiguration(), 
+                    new CacheConfiguration("123_metadata"),
+                    new CacheConfiguration("123_data"), null)));
+
+            Assert.AreEqual("EntityFramework meta cache should be Transactional.", ex.Message);
+
+            // Same cache names.
+            var ex2 = Assert.Throws<ArgumentException>(() => CheckCacheAndStop(null, "abc",
+                new IgniteDbConfiguration(TestUtils.GetTestConfiguration(),
+                    new CacheConfiguration("abc"),
+                    new CacheConfiguration("abc"), null)));
+
+            Assert.IsTrue(ex2.Message.Contains("Meta and Data cache can't have the same name."));
+        }
+
+        /// <summary>
+        /// Checks that specified cache exists and stops all Ignite instances.
+        /// </summary>
+        // ReSharper disable once UnusedParameter.Local
+        private static void CheckCacheAndStop(string gridName, string cacheName, IgniteDbConfiguration cfg,
+            CacheMode cacheMode = CacheMode.Partitioned)
+        {
+            try
+            {
+                Assert.IsNotNull(cfg);
+
+                var ignite = Ignition.TryGetIgnite(gridName);
+                Assert.IsNotNull(ignite);
+
+                var metaCache = ignite.GetCache<object, object>(cacheName + "_metadata");
+                Assert.IsNotNull(metaCache);
+                Assert.AreEqual(cacheMode, metaCache.GetConfiguration().CacheMode);
+
+                if (cacheMode == CacheMode.Partitioned)
+                    Assert.AreEqual(1, metaCache.GetConfiguration().Backups);
+
+                var dataCache = ignite.GetCache<object, object>(cacheName + "_data");
+                Assert.IsNotNull(dataCache);
+                Assert.AreEqual(cacheMode, dataCache.GetConfiguration().CacheMode);
+
+                if (cacheMode == CacheMode.Partitioned)
+                    Assert.AreEqual(0, dataCache.GetConfiguration().Backups);
+            }
+            finally
+            {
+                Ignition.StopAll(true);
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheTest.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheTest.cs
new file mode 100644
index 0000000..5fbd8fe
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/EntityFrameworkCacheTest.cs
@@ -0,0 +1,946 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// ReSharper disable UnusedMember.Local
+// ReSharper disable UnusedAutoPropertyAccessor.Local
+// ReSharper disable ClassWithVirtualMembersNeverInherited.Local
+// ReSharper disable UnusedAutoPropertyAccessor.Global
+// ReSharper disable VirtualMemberNeverOverridden.Global
+
+namespace Apache.Ignite.EntityFramework.Tests
+{
+    using System;
+    using System.Collections.Generic;
+    using System.Data;
+    using System.Data.Entity;
+    using System.Data.Entity.Core.EntityClient;
+    using System.Data.Entity.Infrastructure;
+    using System.IO;
+    using System.Linq;
+    using System.Threading;
+    using System.Transactions;
+    using Apache.Ignite.Core;
+    using Apache.Ignite.Core.Cache;
+    using Apache.Ignite.Core.Tests;
+    using Apache.Ignite.EntityFramework;
+    using Apache.Ignite.EntityFramework.Impl;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Integration test with temporary SQL CE database.
+    /// </summary>
+    public class EntityFrameworkCacheTest
+    {
+        /** */
+        private static readonly string TempFile = Path.GetTempFileName();
+
+        /** */
+        private static readonly string ConnectionString = "Datasource = " + TempFile;
+
+        /** */
+        private static readonly DelegateCachingPolicy Policy = new DelegateCachingPolicy();
+
+        /** */
+        private ICache<object, object> _cache;
+
+        /** */
+        private ICache<object, object> _metaCache;
+
+        /// <summary>
+        /// Fixture set up.
+        /// </summary>
+        [TestFixtureSetUp]
+        public void FixtureSetUp()
+        {
+            // Start 2 nodes.
+            var cfg = TestUtils.GetTestConfiguration();
+            var ignite = Ignition.Start(cfg);
+
+            Ignition.Start(new IgniteConfiguration(cfg) {GridName = "grid2"});
+
+            // Create SQL CE database in a temp file.
+            using (var ctx = GetDbContext())
+            {
+                File.Delete(TempFile);
+                ctx.Database.Create();
+            }
+
+            // Get the caches.
+            _cache = ignite.GetCache<object, object>("entityFrameworkQueryCache_data")
+                .WithKeepBinary<object, object>();
+
+            _metaCache = ignite.GetCache<object, object>("entityFrameworkQueryCache_metadata")
+                .WithKeepBinary<object, object>();
+        }
+
+        /// <summary>
+        /// Fixture tear down.
+        /// </summary>
+        [TestFixtureTearDown]
+        public void FixtureTearDown()
+        {
+            using (var ctx = GetDbContext())
+            {
+                ctx.Database.Delete();
+            }
+
+            Ignition.StopAll(true);
+            File.Delete(TempFile);
+        }
+
+        /// <summary>
+        /// Sets up the test.
+        /// </summary>
+        [SetUp]
+        public void TestSetUp()
+        {
+            // Reset the policy.
+            Policy.CanBeCachedFunc = null;
+            Policy.CanBeCachedRowsFunc = null;
+            Policy.GetExpirationTimeoutFunc = null;
+            Policy.GetCachingStrategyFunc = null;
+
+            // Clean up the db.
+            using (var ctx = GetDbContext())
+            {
+                ctx.Blogs.RemoveRange(ctx.Blogs);
+                ctx.Posts.RemoveRange(ctx.Posts);
+                ctx.Tests.RemoveRange(ctx.Tests);
+
+                ctx.SaveChanges();
+            }
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.IsEmpty(ctx.Blogs);
+                Assert.IsEmpty(ctx.Posts);
+            }
+
+            // Clear the caches.
+            _cache.Clear();
+            _metaCache.Clear();
+        }
+
+        /// <summary>
+        /// Tests that caching actually happens.
+        /// </summary>
+        [Test]
+        public void TestResultFromCache()
+        {
+            using (var ctx = GetDbContext())
+            {
+                // Add data.
+                ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog(), PostId = 1});
+                ctx.Posts.Add(new Post {Title = "Bar", Blog = new Blog(), PostId = 2});
+                ctx.SaveChanges();
+
+                Assert.AreEqual(new[] {"Foo"}, ctx.Posts.Where(x => x.Title == "Foo").Select(x => x.Title).ToArray());
+                Assert.AreEqual(new[] {"Bar"}, ctx.Posts.Where(x => x.Title == "Bar").Select(x => x.Title).ToArray());
+
+                // Alter cached data: swap cached values.
+                
+                var cachedData = _cache.ToArray();
+
+                Assert.AreEqual(2, cachedData.Length);
+
+                _cache[cachedData[0].Key] = cachedData[1].Value;
+                _cache[cachedData[1].Key] = cachedData[0].Value;
+
+                // Verify.
+                Assert.AreEqual(new[] {"Bar"}, ctx.Posts.Where(x => x.Title == "Foo").Select(x => x.Title).ToArray());
+                Assert.AreEqual(new[] {"Foo"}, ctx.Posts.Where(x => x.Title == "Bar").Select(x => x.Title).ToArray());
+            }
+        }
+
+        /// <summary>
+        /// Tests the read-write strategy (default).
+        /// </summary>
+        [Test]
+        public void TestReadWriteStrategy()
+        {
+            using (var ctx = GetDbContext())
+            {
+                var blog = new Blog
+                {
+                    Name = "Foo",
+                    Posts = new List<Post>
+                    {
+                        new Post {Title = "My First Post", Content = "Hello World!"}
+                    }
+                };
+                ctx.Blogs.Add(blog);
+
+                Assert.AreEqual(2, ctx.SaveChanges());
+
+                // Check that query works.
+                Assert.AreEqual(1, ctx.Posts.Where(x => x.Title.StartsWith("My")).ToArray().Length);
+
+                // Add new post to check invalidation.
+                ctx.Posts.Add(new Post {BlogId = blog.BlogId, Title = "My Second Post", Content = "Foo bar."});
+                Assert.AreEqual(1, ctx.SaveChanges());
+
+                Assert.AreEqual(0, _cache.GetSize()); // No cached entries.
+
+                Assert.AreEqual(2, ctx.Posts.Where(x => x.Title.StartsWith("My")).ToArray().Length);
+
+                Assert.AreEqual(1, _cache.GetSize()); // Cached query added.
+
+                // Delete post.
+                ctx.Posts.Remove(ctx.Posts.First());
+                Assert.AreEqual(1, ctx.SaveChanges());
+
+                Assert.AreEqual(0, _cache.GetSize()); // No cached entries.
+                Assert.AreEqual(1, ctx.Posts.Where(x => x.Title.StartsWith("My")).ToArray().Length);
+
+                Assert.AreEqual(1, _cache.GetSize()); // Cached query added.
+
+                // Modify post.
+                Assert.AreEqual(0, ctx.Posts.Count(x => x.Title.EndsWith("updated")));
+
+                ctx.Posts.Single().Title += " - updated";
+                Assert.AreEqual(1, ctx.SaveChanges());
+
+                Assert.AreEqual(0, _cache.GetSize()); // No cached entries.
+                Assert.AreEqual(1, ctx.Posts.Count(x => x.Title.EndsWith("updated")));
+
+                Assert.AreEqual(1, _cache.GetSize()); // Cached query added.
+            }
+        }
+
+        /// <summary>
+        /// Tests the read only strategy.
+        /// </summary>
+        [Test]
+        public void TestReadOnlyStrategy()
+        {
+            // Set up a policy to cache Blogs as read-only and Posts as read-write.
+            Policy.GetCachingStrategyFunc = q =>
+                q.AffectedEntitySets.Count == 1 && q.AffectedEntitySets.Single().Name == "Blog"
+                    ? DbCachingMode.ReadOnly
+                    : DbCachingMode.ReadWrite;
+
+            using (var ctx = GetDbContext())
+            {
+                ctx.Blogs.Add(new Blog
+                {
+                    Name = "Foo",
+                    Posts = new List<Post>
+                    {
+                        new Post {Title = "Post"}
+                    }
+                });
+
+                ctx.SaveChanges();
+
+                // Update entities.
+                Assert.AreEqual("Foo", ctx.Blogs.Single().Name);
+                Assert.AreEqual("Post", ctx.Posts.Single().Title);
+
+                ctx.Blogs.Single().Name += " - updated";
+                ctx.Posts.Single().Title += " - updated";
+
+                ctx.SaveChanges();
+            }
+
+            // Verify that cached result is not changed for blogs, but changed for posts.
+            using (var ctx = GetDbContext())
+            {
+                // Raw SQL queries do not hit cache - verify that actual data is updated.
+                Assert.AreEqual("Foo - updated", ctx.Database.SqlQuery<string>("select name from blogs").Single());
+                Assert.AreEqual("Post - updated", ctx.Database.SqlQuery<string>("select title from posts").Single());
+
+                // Check EF queries that hit cache.
+                Assert.AreEqual("Foo", ctx.Blogs.Single().Name);
+                Assert.AreEqual("Post - updated", ctx.Posts.Single().Title);
+
+            }
+
+            // Clear the cache and verify that actual value in DB is changed.
+            _cache.Clear();
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.AreEqual("Foo - updated", ctx.Blogs.Single().Name);
+                Assert.AreEqual("Post - updated", ctx.Posts.Single().Title);
+            }
+        }
+
+        /// <summary>
+        /// Tests the scalar queries.
+        /// </summary>
+        [Test]
+        public void TestScalars()
+        {
+            using (var ctx = GetDbContext())
+            {
+                var blog = new Blog
+                {
+                    Name = "Foo",
+                    Posts = new List<Post>
+                    {
+                        new Post {Title = "1"},
+                        new Post {Title = "2"},
+                        new Post {Title = "3"},
+                        new Post {Title = "4"}
+                    }
+                };
+                ctx.Blogs.Add(blog);
+
+                Assert.AreEqual(5, ctx.SaveChanges());
+
+                // Test sum and count.
+                const string esql = "SELECT COUNT(1) FROM [BloggingContext].Posts";
+
+                Assert.AreEqual(4, ctx.Posts.Count());
+                Assert.AreEqual(4, ctx.Posts.Count(x => x.Content == null));
+                Assert.AreEqual(4, GetEntityCommand(ctx, esql).ExecuteScalar());
+                Assert.AreEqual(blog.BlogId*4, ctx.Posts.Sum(x => x.BlogId));
+
+                ctx.Posts.Remove(ctx.Posts.First());
+                ctx.SaveChanges();
+
+                Assert.AreEqual(3, ctx.Posts.Count());
+                Assert.AreEqual(3, ctx.Posts.Count(x => x.Content == null));
+                Assert.AreEqual(3, GetEntityCommand(ctx, esql).ExecuteScalar());
+                Assert.AreEqual(blog.BlogId*3, ctx.Posts.Sum(x => x.BlogId));
+            }
+        }
+
+        /// <summary>
+        /// Tests transactions created with BeginTransaction.
+        /// </summary>
+        [Test]
+        public void TestTx()
+        {
+            // Check TX without commit.
+            using (var ctx = GetDbContext())
+            {
+                using (ctx.Database.BeginTransaction())
+                {
+                    ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog()});
+                    ctx.SaveChanges();
+
+                    Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+                }
+            }
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.AreEqual(0, ctx.Posts.ToArray().Length);
+            }
+
+            // Check TX with commit.
+            using (var ctx = GetDbContext())
+            {
+                using (var tx = ctx.Database.BeginTransaction())
+                {
+                    ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog()});
+                    ctx.SaveChanges();
+
+                    Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+
+                    tx.Commit();
+
+                    Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+                }
+            }
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+            }
+        }
+
+        /// <summary>
+        /// Tests transactions created with TransactionScope.
+        /// </summary>
+        [Test]
+        public void TestTxScope()
+        {
+            // Check TX without commit.
+            using (new TransactionScope())
+            {
+                using (var ctx = GetDbContext())
+                {
+                    ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog()});
+                    ctx.SaveChanges();
+                }
+            }
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.AreEqual(0, ctx.Posts.ToArray().Length);
+            }
+
+            // Check TX with commit.
+            using (var tx = new TransactionScope())
+            {
+                using (var ctx = GetDbContext())
+                {
+                    ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog()});
+                    ctx.SaveChanges();
+                }
+
+                tx.Complete();
+            }
+
+            using (var ctx = GetDbContext())
+            {
+                Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+            }
+        }
+
+        /// <summary>
+        /// Tests the expiration.
+        /// </summary>
+        [Test]
+        public void TestExpiration()
+        {
+            Policy.GetExpirationTimeoutFunc = qry => TimeSpan.FromSeconds(0.3);
+
+            using (var ctx = GetDbContext())
+            {
+                ctx.Posts.Add(new Post {Title = "Foo", Blog = new Blog()});
+                ctx.SaveChanges();
+
+                Assert.AreEqual(1, ctx.Posts.ToArray().Length);
+                Assert.AreEqual(1, _cache.GetSize());
+
+                var key = _cache.Single().Key;
+                Assert.IsTrue(_cache.ContainsKey(key));
+
+                Thread.Sleep(300);
+
+                Assert.IsFalse(_cache.ContainsKey(key));
+                Assert.AreEqual(0, _cache.GetSize());
+                Assert.AreEqual(2, _metaCache.GetSize());
+            }
+        }
+
+        /// <summary>
+        /// Tests the caching policy.
+        /// </summary>
+        [Test]
+        public void TestCachingPolicy()
+        {
+            var funcs = new List<string>();
+
+            var checkQry = (Action<DbQueryInfo>) (qry =>
+                {
+                    var set = qry.AffectedEntitySets.Single();
+
+                    Assert.AreEqual("Post", set.Name);
+
+                    Assert.AreEqual(1, qry.Parameters.Count);
+                    Assert.AreEqual(-5, qry.Parameters[0].Value);
+                    Assert.AreEqual(DbType.Int32, qry.Parameters[0].DbType);
+
+                    Assert.IsTrue(qry.CommandText.EndsWith("WHERE [Extent1].[BlogId] > @p__linq__0"));
+                }
+            );
+
+            Policy.CanBeCachedFunc = qry =>
+            {
+                funcs.Add("CanBeCached");
+                checkQry(qry);
+                return true;
+            };
+
+            Policy.CanBeCachedRowsFunc = (qry, rows) =>
+            {
+                funcs.Add("CanBeCachedRows");
+                Assert.AreEqual(3, rows);
+                checkQry(qry);
+                return true;
+            };
+
+            Policy.GetCachingStrategyFunc = qry =>
+            {
+                funcs.Add("GetCachingStrategy");
+                checkQry(qry);
+                return DbCachingMode.ReadWrite;
+            };
+
+            Policy.GetExpirationTimeoutFunc = qry =>
+            {
+                funcs.Add("GetExpirationTimeout");
+                checkQry(qry);
+                return TimeSpan.MaxValue;
+            };
+
+            using (var ctx = GetDbContext())
+            {
+                var blog = new Blog();
+
+                ctx.Posts.Add(new Post {Title = "Foo", Blog = blog});
+                ctx.Posts.Add(new Post {Title = "Bar", Blog = blog});
+                ctx.Posts.Add(new Post {Title = "Baz", Blog = blog});
+
+                ctx.SaveChanges();
+
+                int minId = -5;
+                Assert.AreEqual(3, ctx.Posts.Where(x => x.BlogId > minId).ToArray().Length);
+
+                // Check that policy methods are called in correct order with correct params.
+                Assert.AreEqual(
+                    new[] {"GetCachingStrategy", "CanBeCached", "CanBeCachedRows", "GetExpirationTimeout"},
+                    funcs.ToArray());
+            }
+        }
+
+        /// <summary>
+        /// Tests the cache reader indirectly with an entity that has various field types.
+        /// </summary>
+        [Test]
+        public void TestCacheReader()
+        {
+            // Tests all kinds of entity field types to cover ArrayDbDataReader.
+            var test = GetTestEntity();
+
+            using (var ctx = new BloggingContext(ConnectionString))
+            {
+                ctx.Tests.Add(test);
+                ctx.SaveChanges();
+            }
+
+            // Use new context to ensure no first-level caching.
+            using (var ctx = new BloggingContext(ConnectionString))
+            {
+                // Check default deserialization.
+                var test0 = ctx.Tests.Single(x => x.Bool);
+                Assert.AreEqual(test, test0);
+            }
+        }
+
+        /// <summary>
+        /// Tests the cache reader by calling it directly.
+        /// These calls are (partly) delegated by EF to the <see cref="ArrayDbDataReader"/>.
+        /// </summary>
+        [Test]
+        public void TestCacheReaderRaw()
+        {
+            var test = GetTestEntity();
+
+            using (var ctx = new BloggingContext(ConnectionString))
+            {
+                ctx.Tests.Add(test);
+                ctx.SaveChanges();
+
+                test = ctx.Tests.Single();
+            }
+
+            using (var ctx = new BloggingContext(ConnectionString))
+            {
+                var cmd = GetEntityCommand(ctx, "SELECT VALUE Test FROM BloggingContext.Tests AS Test");
+
+                using (var reader = cmd.ExecuteReader(CommandBehavior.SequentialAccess))
+                {
+                    // Check schema.
+                    Assert.Throws<NotSupportedException>(() => reader.GetSchemaTable());
+                    Assert.AreEqual(0, reader.Depth);
+                    Assert.AreEqual(-1, reader.RecordsAffected);
+                    Assert.IsTrue(reader.HasRows);
+                    Assert.IsFalse(reader.IsClosed);
+                    Assert.AreEqual(11, reader.FieldCount);
+                    Assert.AreEqual(11, reader.VisibleFieldCount);
+
+                    // Check field names.
+                    Assert.AreEqual("Edm.Int32", reader.GetDataTypeName(0));
+                    Assert.AreEqual("Edm.Byte", reader.GetDataTypeName(1));
+                    Assert.AreEqual("Edm.Int16", reader.GetDataTypeName(2));
+                    Assert.AreEqual("Edm.Int64", reader.GetDataTypeName(3));
+                    Assert.AreEqual("Edm.Single", reader.GetDataTypeName(4));
+                    Assert.AreEqual("Edm.Double", reader.GetDataTypeName(5));
+                    Assert.AreEqual("Edm.Decimal", reader.GetDataTypeName(6));
+                    Assert.AreEqual("Edm.Boolean", reader.GetDataTypeName(7));
+                    Assert.AreEqual("Edm.String", reader.GetDataTypeName(8));
+                    Assert.AreEqual("Edm.Guid", reader.GetDataTypeName(9));
+                    Assert.AreEqual("Edm.DateTime", reader.GetDataTypeName(10));
+
+                    // Check field types.
+                    Assert.AreEqual(typeof(int), reader.GetFieldType(0));
+                    Assert.AreEqual(typeof(byte), reader.GetFieldType(1));
+                    Assert.AreEqual(typeof(short), reader.GetFieldType(2));
+                    Assert.AreEqual(typeof(long), reader.GetFieldType(3));
+                    Assert.AreEqual(typeof(float), reader.GetFieldType(4));
+                    Assert.AreEqual(typeof(double), reader.GetFieldType(5));
+                    Assert.AreEqual(typeof(decimal), reader.GetFieldType(6));
+                    Assert.AreEqual(typeof(bool), reader.GetFieldType(7));
+                    Assert.AreEqual(typeof(string), reader.GetFieldType(8));
+                    Assert.AreEqual(typeof(Guid), reader.GetFieldType(9));
+                    Assert.AreEqual(typeof(DateTime), reader.GetFieldType(10));
+
+                    // Read.
+                    Assert.IsTrue(reader.Read());
+
+                    // Test values array.
+                    var vals = new object[reader.FieldCount];
+                    reader.GetValues(vals);
+
+                    Assert.AreEqual(test.Byte, vals[reader.GetOrdinal("Byte")]);
+                    Assert.AreEqual(test.Short, vals[reader.GetOrdinal("Short")]);
+                    Assert.AreEqual(test.ArrayReaderTestId, vals[reader.GetOrdinal("ArrayReaderTestId")]);
+                    Assert.AreEqual(test.Long, vals[reader.GetOrdinal("Long")]);
+                    Assert.AreEqual(test.Float, vals[reader.GetOrdinal("Float")]);
+                    Assert.AreEqual(test.Double, vals[reader.GetOrdinal("Double")]);
+                    Assert.AreEqual(test.Decimal, vals[reader.GetOrdinal("Decimal")]);
+                    Assert.AreEqual(test.Bool, vals[reader.GetOrdinal("Bool")]);
+                    Assert.AreEqual(test.String, vals[reader.GetOrdinal("String")]);
+                    Assert.AreEqual(test.Guid, vals[reader.GetOrdinal("Guid")]);
+                    Assert.AreEqual(test.DateTime, vals[reader.GetOrdinal("DateTime")]);
+                }
+
+                using (var reader = cmd.ExecuteReader(CommandBehavior.SequentialAccess))
+                {
+                    // Read.
+                    Assert.IsTrue(reader.Read());
+
+                    // Test separate values.
+                    Assert.AreEqual(test.ArrayReaderTestId, reader.GetInt32(0));
+                    Assert.AreEqual(test.Byte, reader.GetByte(1));
+                    Assert.AreEqual(test.Short, reader.GetInt16(2));
+                    Assert.AreEqual(test.Long, reader.GetInt64(3));
+                    Assert.AreEqual(test.Float, reader.GetFloat(4));
+                    Assert.AreEqual(test.Double, reader.GetDouble(5));
+                    Assert.AreEqual(test.Decimal, reader.GetDecimal(6));
+                    Assert.AreEqual(test.Bool, reader.GetBoolean(7));
+                    Assert.AreEqual(test.String, reader.GetString(8));
+                    Assert.AreEqual(test.Guid, reader.GetGuid(9));
+                    Assert.AreEqual(test.DateTime, reader.GetDateTime(10));
+                }
+            }
+        }
+
+        /// <summary>
+        /// Tests the database context.
+        /// </summary>
+        [Test]
+        public void TestDbContext()
+        {
+            using (var ctx = GetDbContext())
+            {
+                var objCtx = ((IObjectContextAdapter) ctx).ObjectContext;
+
+                var script = objCtx.CreateDatabaseScript();
+                Assert.IsTrue(script.StartsWith("CREATE TABLE \"Blogs\""));
+            }
+        }
+
+        /// <summary>
+        /// Tests that old versions of caches entries are cleaned up.
+        /// </summary>
+        [Test]
+        public void TestOldEntriesCleanup()
+        {
+            // Run in a loop to generate a bunch of outdated cache entries.
+            for (var i = 0; i < 100; i++)
+                CreateRemoveBlog();
+
+            // Only one version of data is in the cache.
+            Assert.AreEqual(1, _cache.GetSize());
+            Assert.AreEqual(1, _metaCache.GetSize());
+        }
+
+        /// <summary>
+        /// Tests the old entries cleanup in multi threaded scenario.
+        /// </summary>
+        [Test]
+        [Category(TestUtils.CategoryIntensive)]
+        public void TestOldEntriesCleanupMultithreaded()
+        {
+            TestUtils.RunMultiThreaded(CreateRemoveBlog, 4, 20);
+
+            // Wait for the cleanup to complete.
+            Thread.Sleep(500);
+
+            // Only one version of data is in the cache.
+            Assert.AreEqual(1, _cache.GetSize());
+            Assert.AreEqual(1, _metaCache.GetSize());
+        }
+
+        /// <summary>
+        /// Tests the entity set version increment in multi-threaded scenario.
+        /// </summary>
+        [Test]
+        [Category(TestUtils.CategoryIntensive)]
+        public void TestIncrementMultithreaded()
+        {
+            var opCnt = 0;
+
+            TestUtils.RunMultiThreaded(() =>
+            {
+                var blog = new Blog {Name = "my blog"};
+                using (var ctx = GetDbContext())
+                {
+                    ctx.Blogs.Add(blog);
+                    ctx.SaveChanges();
+                }
+
+                Interlocked.Increment(ref opCnt);
+
+                using (var ctx = GetDbContext())
+                {
+                    ctx.Blogs.Attach(blog);
+                    ctx.Blogs.Remove(blog);
+                    ctx.SaveChanges();
+                }
+
+                Interlocked.Increment(ref opCnt);
+            }, 4, 10);
+
+            var setVersion = _metaCache["Blog"];
+
+            Assert.AreEqual(opCnt, setVersion);
+        }
+
+        /// <summary>
+        /// Creates and removes a blog.
+        /// </summary>
+        private void CreateRemoveBlog()
+        {
+            try
+            {
+                CreateRemoveBlog0();
+            }
+            catch (Exception ex)
+            {
+                // Ignore SQL CE glitch.
+                if (!ex.ToString().Contains("The current row was deleted."))
+                    throw;
+            }
+        }
+
+        /// <summary>
+        /// Creates and removes a blog.
+        /// </summary>
+        private void CreateRemoveBlog0()
+        {
+            var blog = new Blog {Name = "my blog"};
+            var threadId = Thread.CurrentThread.ManagedThreadId;
+
+            Func<object> getMeta = () => _metaCache.Where(x => x.Key.Equals("Blog"))
+                .Select(x => x.Value).SingleOrDefault() ?? "null";
+
+            var meta1 = getMeta();
+
+            using (var ctx = GetDbContext())
+            {
+                ctx.Blogs.Add(blog);
+                ctx.SaveChanges();
+            }
+
+            var meta2 = getMeta();
+
+            using (var ctx = GetDbContext())
+            {
+                // Use ToArray so that there is always the same DB query.
+                Assert.AreEqual(1, ctx.Blogs.ToArray().Count(x => x.BlogId == blog.BlogId),
+                    string.Format("Existing blog not found: {0} = {1}, {2} | {3}", blog.BlogId, meta1, meta2, 
+                    threadId));
+            }
+
+            var meta3 = getMeta();
+
+            using (var ctx = GetDbContext())
+            {
+                ctx.Blogs.Attach(blog);
+                ctx.Blogs.Remove(blog);
+                ctx.SaveChanges();
+            }
+
+            var meta4 = getMeta();
+
+            using (var ctx = GetDbContext())
+            {
+                // Use ToArray so that there is always the same DB query.
+                Assert.AreEqual(0, ctx.Blogs.ToArray().Count(x => x.BlogId == blog.BlogId),
+                    string.Format("Found removed blog: {0} = {1}, {2}, {3}, {4} | {5}", blog.BlogId, meta1, 
+                    meta2, meta3, meta4, threadId));
+            }
+        }
+
+        /// <summary>
+        /// Executes the entity SQL.
+        /// </summary>
+        private static EntityCommand GetEntityCommand(IObjectContextAdapter ctx, string esql)
+        {
+            var objCtx = ctx.ObjectContext;
+
+            var conn = objCtx.Connection;
+            conn.Open();
+
+            var cmd = (EntityCommand) conn.CreateCommand();
+            cmd.CommandText = esql;
+
+            return cmd;
+        }
+
+        /// <summary>
+        /// Gets the test entity.
+        /// </summary>
+        private static ArrayReaderTest GetTestEntity()
+        {
+            return new ArrayReaderTest
+            {
+                DateTime = DateTime.Today,
+                Bool = true,
+                Byte = 56,
+                String = "z",
+                Decimal = (decimal)5.6,
+                Double = 7.8d,
+                Float = -4.5f,
+                Guid = Guid.NewGuid(),
+                ArrayReaderTestId = -8,
+                Long = 3,
+                Short = 5
+            };
+        }
+
+        /// <summary>
+        /// Gets the database context.
+        /// </summary>
+        private static BloggingContext GetDbContext()
+        {
+            return new BloggingContext(ConnectionString);
+        }
+
+        private class MyDbConfiguration : IgniteDbConfiguration
+        {
+            public MyDbConfiguration() : base(Ignition.GetIgnite(), null, null, Policy)
+            {
+                // No-op.
+            }
+        }
+
+        [DbConfigurationType(typeof(MyDbConfiguration))]
+        private class BloggingContext : DbContext
+        {
+            public BloggingContext(string nameOrConnectionString) : base(nameOrConnectionString)
+            {
+                // No-op.
+            }
+
+            public virtual DbSet<Blog> Blogs { get; set; }
+            public virtual DbSet<Post> Posts { get; set; }
+            public virtual DbSet<ArrayReaderTest> Tests { get; set; }
+        }
+
+        private class Blog
+        {
+            public int BlogId { get; set; }
+            public string Name { get; set; }
+
+            public virtual List<Post> Posts { get; set; }
+        }
+
+        private class Post
+        {
+            public int PostId { get; set; }
+            public string Title { get; set; }
+            public string Content { get; set; }
+
+            public int BlogId { get; set; }
+            public virtual Blog Blog { get; set; }
+        }
+
+        private class ArrayReaderTest
+        {
+            public byte Byte { get; set; }
+            public short Short { get; set; }
+            public int ArrayReaderTestId { get; set; }
+            public long Long { get; set; }
+            public float Float { get; set; }
+            public double Double { get; set; }
+            public decimal Decimal { get; set; }
+            public bool Bool { get; set; }
+            public string String { get; set; }
+            public Guid Guid { get; set; }
+            public DateTime DateTime { get; set; }
+
+            private bool Equals(ArrayReaderTest other)
+            {
+                return Byte == other.Byte && Short == other.Short &&
+                       ArrayReaderTestId == other.ArrayReaderTestId && Long == other.Long && 
+                       Float.Equals(other.Float) && Double.Equals(other.Double) && 
+                       Decimal == other.Decimal && Bool == other.Bool && String == other.String && 
+                       Guid.Equals(other.Guid) && DateTime.Equals(other.DateTime);
+            }
+
+            public override bool Equals(object obj)
+            {
+                if (ReferenceEquals(null, obj)) return false;
+                if (ReferenceEquals(this, obj)) return true;
+                if (obj.GetType() != GetType()) return false;
+                return Equals((ArrayReaderTest) obj);
+            }
+
+            public override int GetHashCode()
+            {
+                unchecked
+                {
+                    var hashCode = Byte.GetHashCode();
+                    hashCode = (hashCode*397) ^ Short.GetHashCode();
+                    hashCode = (hashCode*397) ^ ArrayReaderTestId;
+                    hashCode = (hashCode*397) ^ Long.GetHashCode();
+                    hashCode = (hashCode*397) ^ Float.GetHashCode();
+                    hashCode = (hashCode*397) ^ Double.GetHashCode();
+                    hashCode = (hashCode*397) ^ Decimal.GetHashCode();
+                    hashCode = (hashCode*397) ^ Bool.GetHashCode();
+                    hashCode = (hashCode*397) ^ String.GetHashCode();
+                    hashCode = (hashCode*397) ^ Guid.GetHashCode();
+                    hashCode = (hashCode*397) ^ DateTime.GetHashCode();
+                    return hashCode;
+                }
+            }
+        }
+
+        private class DelegateCachingPolicy : DbCachingPolicy
+        {
+            public Func<DbQueryInfo, bool> CanBeCachedFunc { get; set; }
+
+            public Func<DbQueryInfo, int, bool> CanBeCachedRowsFunc { get; set; }
+
+            public Func<DbQueryInfo, TimeSpan> GetExpirationTimeoutFunc { get; set; }
+
+            public Func<DbQueryInfo, DbCachingMode> GetCachingStrategyFunc { get; set; }
+
+            public override bool CanBeCached(DbQueryInfo queryInfo)
+            {
+                return CanBeCachedFunc == null || CanBeCachedFunc(queryInfo);
+            }
+
+            public override bool CanBeCached(DbQueryInfo queryInfo, int rowCount)
+            {
+                return CanBeCachedRowsFunc == null || CanBeCachedRowsFunc(queryInfo, rowCount);
+            }
+
+            public override TimeSpan GetExpirationTimeout(DbQueryInfo queryInfo)
+            {
+                return GetExpirationTimeoutFunc == null 
+                    ? base.GetExpirationTimeout(queryInfo) 
+                    : GetExpirationTimeoutFunc(queryInfo);
+            }
+
+            public override DbCachingMode GetCachingMode(DbQueryInfo queryInfo)
+            {
+                return GetCachingStrategyFunc == null 
+                    ? base.GetCachingMode(queryInfo)
+                    : GetCachingStrategyFunc(queryInfo);
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..fe5e7ce
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/Properties/AssemblyInfo.cs
@@ -0,0 +1,39 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+using System;
+using System.Reflection;
+using System.Runtime.InteropServices;
+
+[assembly: AssemblyTitle("Apache.Ignite.EntityFramework.Tests")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("Apache Software Foundation")]
+[assembly: AssemblyProduct("Apache Ignite.NET")]
+[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+[assembly: ComVisible(false)]
+
+[assembly: Guid("cda5700e-78f3-4a9e-a9b0-704cbe94651c")]
+
+[assembly: AssemblyVersion("1.8.0.14218")]
+[assembly: AssemblyFileVersion("1.8.0.14218")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
+
+[assembly: CLSCompliant(true)]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/packages.config b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/packages.config
new file mode 100644
index 0000000..42a3b73
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework.Tests/packages.config
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<packages>
+  <package id="NUnit.Runners" version="2.6.3" targetFramework="net40" />
+  <package id="EntityFramework" version="6.1.3" targetFramework="net40" />
+  <package id="EntityFramework.SqlServerCompact" version="6.1.3" targetFramework="net40" />
+  <package id="Microsoft.SqlServer.Compact" version="4.0.8876.1" targetFramework="net40" />
+</packages>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.csproj b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.csproj
new file mode 100644
index 0000000..8b3c651
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.csproj
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProjectGuid>{C558518A-C1A0-4224-AAA9-A8688474B4DC}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Apache.Ignite.EntityFramework</RootNamespace>
+    <AssemblyName>Apache.Ignite.EntityFramework</AssemblyName>
+    <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+    <RunCodeAnalysis>true</RunCodeAnalysis>
+    <CodeAnalysisRuleSet>AllRules.ruleset</CodeAnalysisRuleSet>
+    <TreatWarningsAsErrors>true</TreatWarningsAsErrors>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SignAssembly>true</SignAssembly>
+  </PropertyGroup>
+  <PropertyGroup>
+    <AssemblyOriginatorKeyFile>Apache.Ignite.EntityFramework.snk</AssemblyOriginatorKeyFile>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="EntityFramework, Version=6.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089, processorArchitecture=MSIL">
+      <HintPath>..\packages\EntityFramework.6.1.3\lib\net40\EntityFramework.dll</HintPath>
+      <Private>True</Private>
+    </Reference>
+    <Reference Include="System" />
+    <Reference Include="System.Configuration" />
+    <Reference Include="System.Core" />
+    <Reference Include="System.Data" />
+    <Reference Include="System.Xml" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="DbCachingMode.cs" />
+    <Compile Include="DbQueryInfo.cs" />
+    <Compile Include="IDbCachingPolicy.cs" />
+    <Compile Include="Impl\ArrayDbDataReader.cs" />
+    <Compile Include="Impl\DataReaderResult.cs" />
+    <Compile Include="Impl\DbCacheKey.cs" />
+    <Compile Include="Impl\DbCommandDefinitionProxy.cs" />
+    <Compile Include="Impl\DbCommandInfo.cs" />
+    <Compile Include="Impl\DbCommandProxy.cs">
+      <SubType>Component</SubType>
+    </Compile>
+    <Compile Include="Impl\DbProviderServicesProxy.cs" />
+    <Compile Include="Impl\DataReaderField.cs" />
+    <Compile Include="DbCachingPolicy.cs" />
+    <Compile Include="IgniteDbConfiguration.cs" />
+    <Compile Include="Impl\DbCache.cs" />
+    <Compile Include="Impl\DbTransactionInterceptor.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="Apache.Ignite.EntityFramework.snk" />
+    <None Include="packages.config" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\Apache.Ignite.Core\Apache.Ignite.Core.csproj">
+      <Project>{4CD2F726-7E2B-46C4-A5BA-057BB82EECB6}</Project>
+      <Name>Apache.Ignite.Core</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="Apache.Ignite.EntityFramework.nuspec" />
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.nuspec b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.nuspec
new file mode 100644
index 0000000..b8bcd46
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.nuspec
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!-- 
+
+Creating NuGet package:
+1) Build Apache.Ignite.sln (AnyCPU configuration)
+2) Create package (use csproj instead of nuspec so that template substitution works): 
+   nuget pack Apache.Ignite.EntityFramework.csproj -Prop Configuration=Release -Prop Platform=AnyCPU
+
+-->
+
+<package >
+    <metadata>
+        <id>Apache.Ignite.EntityFramework</id>
+        <title>Apache Ignite Entity Framework Integration</title>
+        <!-- -->
+        <version>$version$</version>
+        <authors>Apache Ignite</authors>
+        <owners>Apache Software Foundation</owners>
+        <licenseUrl>http://www.apache.org/licenses/LICENSE-2.0</licenseUrl>
+        <projectUrl>https://ignite.apache.org/</projectUrl>
+        <iconUrl>https://ignite.apache.org/images/logo_ignite_32_32.png</iconUrl>
+        <requireLicenseAcceptance>false</requireLicenseAcceptance>
+        <description>
+Apache Ignite EntityFramework Second Level Cache: caches EF query results in a distributed in-memory cache.
+            
+More info: https://apacheignite-net.readme.io/
+        </description>
+        <summary>
+            Apache Ignite EntityFramework Integration
+        </summary>
+        <releaseNotes></releaseNotes>
+        <copyright>Copyright 2016</copyright>
+        <tags>EntityFramework Second-Level Apache Ignite In-Memory Distributed Computing SQL NoSQL Grid Map Reduce Cache</tags>
+        <dependencies>
+            <dependency id="Apache.Ignite" version="[$version$]" />
+            <dependency id="EntityFramework" version="[6.1.0,7.0.0)" />
+        </dependencies>    
+    </metadata>
+</package>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.snk b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.snk
new file mode 100644
index 0000000..799e742
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Apache.Ignite.EntityFramework.snk
Binary files differ
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingMode.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingMode.cs
new file mode 100644
index 0000000..b38400c
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingMode.cs
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework
+{
+    using System.Data.Entity;
+
+    /// <summary>
+    /// Represents a second-level caching strategy.
+    /// </summary>
+    public enum DbCachingMode
+    {
+        /// <summary>
+        /// Read-only mode, never invalidates.
+        /// <para />
+        /// Database updates are ignored in this mode. Once query results have been cached, they are kept in cache 
+        /// until expired (forever when no expiration is specified).
+        /// <para />
+        /// This mode is suitable for data that is not expected to change 
+        /// (like a list of countries and other dictionary data).
+        /// </summary>
+        ReadOnly,
+
+        /// <summary>
+        /// Read-write mode. Cached data is invalidated when underlying entity set changes.
+        /// <para />
+        /// This is "normal" cache mode which always provides correct query results.
+        /// <para />
+        /// Keep in mind that this mode works correctly only when all database changes are performed 
+        /// via <see cref="DbContext"/> with Ignite caching configured. Other database updates are not tracked.
+        /// </summary>
+        ReadWrite
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingPolicy.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingPolicy.cs
new file mode 100644
index 0000000..17aa68a
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbCachingPolicy.cs
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework
+{
+    using System;
+
+    /// <summary>
+    /// Default caching policy implementation: everything is cached with <see cref="DbCachingMode.ReadWrite"/>, 
+    /// no expiration.
+    /// </summary>
+    // ReSharper disable once ClassWithVirtualMembersNeverInherited.Global
+    public class DbCachingPolicy : IDbCachingPolicy
+    {
+        /// <summary>
+        /// Determines whether the specified query can be cached.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>
+        ///   <c>true</c> if the specified query can be cached; otherwise, <c>false</c>.
+        /// </returns>
+        public virtual bool CanBeCached(DbQueryInfo queryInfo)
+        {
+            return true;
+        }
+
+        /// <summary>
+        /// Determines whether specified number of rows should be cached.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <param name="rowCount">The count of fetched rows.</param>
+        /// <returns></returns>
+        public virtual bool CanBeCached(DbQueryInfo queryInfo, int rowCount)
+        {
+            return true;
+        }
+
+        /// <summary>
+        /// Gets the absolute expiration timeout for a given query.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>Expiration timeout. <see cref="TimeSpan.MaxValue"/> for no expiration.</returns>
+        public virtual TimeSpan GetExpirationTimeout(DbQueryInfo queryInfo)
+        {
+            return TimeSpan.MaxValue;
+        }
+
+        /// <summary>
+        /// Gets the caching strategy for a give query.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>Caching strategy for the query.</returns>
+        public virtual DbCachingMode GetCachingMode(DbQueryInfo queryInfo)
+        {
+            return DbCachingMode.ReadWrite;
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbQueryInfo.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbQueryInfo.cs
new file mode 100644
index 0000000..5ec5446
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/DbQueryInfo.cs
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework
+{
+    using System.Collections.Generic;
+    using System.Data.Common;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Diagnostics;
+
+    /// <summary>
+    /// Query info.
+    /// </summary>
+    public class DbQueryInfo
+    {
+        /** */
+        private readonly ICollection<EntitySetBase> _affectedEntitySets;
+
+        /** */
+        private readonly string _commandText;
+        
+        /** */
+        private readonly DbParameterCollection _parameters;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbQueryInfo"/> class.
+        /// </summary>
+        internal DbQueryInfo(ICollection<EntitySetBase> affectedEntitySets, string commandText, 
+            DbParameterCollection parameters)
+        {
+            Debug.Assert(affectedEntitySets != null);
+            Debug.Assert(commandText != null);
+            Debug.Assert(parameters != null);
+
+            _affectedEntitySets = affectedEntitySets;
+            _commandText = commandText;
+            _parameters = parameters;
+        }
+
+        /// <summary>
+        /// Gets the affected entity sets.
+        /// </summary>
+        public ICollection<EntitySetBase> AffectedEntitySets
+        {
+            get { return _affectedEntitySets; }
+        }
+
+        /// <summary>
+        /// Gets the command text.
+        /// </summary>
+        public string CommandText
+        {
+            get { return _commandText; }
+        }
+
+        /// <summary>
+        /// Gets the parameters.
+        /// </summary>
+        public DbParameterCollection Parameters
+        {
+            get { return _parameters; }
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IDbCachingPolicy.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IDbCachingPolicy.cs
new file mode 100644
index 0000000..504ab5e
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IDbCachingPolicy.cs
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework
+{
+    using System;
+
+    /// <summary>
+    /// Caching policy: defines which queries should be cached.
+    /// </summary>
+    public interface IDbCachingPolicy
+    {
+        /// <summary>
+        /// Determines whether the specified query can be cached.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>
+        ///   <c>true</c> if the specified query can be cached; otherwise, <c>false</c>.
+        /// </returns>
+        bool CanBeCached(DbQueryInfo queryInfo);
+
+        /// <summary>
+        /// Determines whether specified number of rows should be cached.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <param name="rowCount">The count of fetched rows.</param>
+        /// <returns></returns>
+        bool CanBeCached(DbQueryInfo queryInfo, int rowCount);
+
+        /// <summary>
+        /// Gets the absolute expiration timeout for a given query.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>Expiration timeout. <see cref="TimeSpan.MaxValue"/> for no expiration.</returns>
+        TimeSpan GetExpirationTimeout(DbQueryInfo queryInfo);
+
+        /// <summary>
+        /// Gets the caching strategy for a give query.
+        /// </summary>
+        /// <param name="queryInfo">The query information.</param>
+        /// <returns>Caching strategy for the query.</returns>
+        DbCachingMode GetCachingMode(DbQueryInfo queryInfo);
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IgniteDbConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IgniteDbConfiguration.cs
new file mode 100644
index 0000000..c467f94
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/IgniteDbConfiguration.cs
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework
+{
+    using System.Configuration;
+    using System.Data.Entity;
+    using System.Data.Entity.Core.Common;
+    using System.Diagnostics.CodeAnalysis;
+    using System.Globalization;
+    using Apache.Ignite.Core;
+    using Apache.Ignite.Core.Cache.Configuration;
+    using Apache.Ignite.Core.Common;
+    using Apache.Ignite.Core.Impl.Common;
+    using Apache.Ignite.EntityFramework.Impl;
+
+    /// <summary>
+    /// <see cref="DbConfiguration"/> implementation that uses Ignite as a second-level cache 
+    /// for Entity Framework queries.
+    /// </summary>
+    public class IgniteDbConfiguration : DbConfiguration
+    {
+        /// <summary>
+        /// The configuration section name to be used when starting Ignite.
+        /// </summary>
+        private const string ConfigurationSectionName = "igniteConfiguration";
+
+        /// <summary>
+        /// The default cache name to be used for cached EF data.
+        /// </summary>
+        public const string DefaultCacheNamePrefix = "entityFrameworkQueryCache";
+
+        /// <summary>
+        /// Suffix for the meta cache name.
+        /// </summary>
+        private const string MetaCacheSuffix = "_metadata";
+
+        /// <summary>
+        /// Suffix for the data cache name.
+        /// </summary>
+        private const string DataCacheSuffix = "_data";
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="IgniteDbConfiguration"/> class.
+        /// <para />
+        /// This constructor uses default Ignite instance (with null <see cref="IgniteConfiguration.GridName"/>) 
+        /// and a cache with <see cref="DefaultCacheNamePrefix"/> name.
+        /// <para />
+        /// Ignite instance will be started automatically, if it is not started yet.
+        /// <para /> 
+        /// <see cref="IgniteConfigurationSection"/> with name 
+        /// <see cref="ConfigurationSectionName"/> will be picked up when starting Ignite, if present.
+        /// </summary>
+        public IgniteDbConfiguration() 
+            : this(GetConfiguration(ConfigurationSectionName, false), 
+                  GetDefaultMetaCacheConfiguration(DefaultCacheNamePrefix), 
+                  GetDefaultDataCacheConfiguration(DefaultCacheNamePrefix), null)
+        {
+            // No-op.
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="IgniteDbConfiguration" /> class.
+        /// </summary>
+        /// <param name="configurationSectionName">Name of the configuration section.</param>
+        /// <param name="cacheNamePrefix">The cache name prefix for Data and Metadata caches.</param>
+        /// <param name="policy">The caching policy. Null for default <see cref="DbCachingPolicy" />.</param>
+        public IgniteDbConfiguration(string configurationSectionName, string cacheNamePrefix, IDbCachingPolicy policy)
+            : this(configurationSectionName,
+                GetDefaultMetaCacheConfiguration(cacheNamePrefix),
+                GetDefaultDataCacheConfiguration(cacheNamePrefix), policy)
+
+        {
+            // No-op.
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="IgniteDbConfiguration"/> class.
+        /// </summary>
+        /// <param name="configurationSectionName">Name of the configuration section.</param>
+        /// <param name="metaCacheConfiguration">
+        /// Configuration of the metadata cache which holds entity set information. Null for default configuration.
+        /// <para />
+        /// This cache holds small amount of data, but should not lose entries. At least one backup recommended.
+        /// </param>
+        /// <param name="dataCacheConfiguration">
+        /// Configuration of the data cache which holds query results. Null for default configuration.
+        /// <para />
+        /// This cache tolerates lost data and can have no backups.
+        /// </param>
+        /// <param name="policy">The caching policy. Null for default <see cref="DbCachingPolicy"/>.</param>
+        public IgniteDbConfiguration(string configurationSectionName, CacheConfiguration metaCacheConfiguration,
+            CacheConfiguration dataCacheConfiguration, IDbCachingPolicy policy)
+            : this(GetConfiguration(configurationSectionName, true), 
+                  metaCacheConfiguration, dataCacheConfiguration, policy)
+        {
+            // No-op.
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="IgniteDbConfiguration" /> class.
+        /// </summary>
+        /// <param name="igniteConfiguration">The ignite configuration to use for starting Ignite instance.</param>
+        /// <param name="metaCacheConfiguration">
+        /// Configuration of the metadata cache which holds entity set information. Null for default configuration. 
+        /// <para />
+        /// This cache holds small amount of data, but should not lose entries. At least one backup recommended.
+        /// </param>
+        /// <param name="dataCacheConfiguration">
+        /// Configuration of the data cache which holds query results. Null for default configuration.
+        /// <para />
+        /// This cache tolerates lost data and can have no backups.
+        /// </param>
+        /// <param name="policy">The caching policy. Null for default <see cref="DbCachingPolicy"/>.</param>
+        public IgniteDbConfiguration(IgniteConfiguration igniteConfiguration,
+            CacheConfiguration metaCacheConfiguration, CacheConfiguration dataCacheConfiguration,
+            IDbCachingPolicy policy)
+            : this(GetOrStartIgnite(igniteConfiguration), metaCacheConfiguration, dataCacheConfiguration, policy)
+        {
+            // No-op.
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="IgniteDbConfiguration" /> class.
+        /// </summary>
+        /// <param name="ignite">The ignite instance to use.</param>
+        /// <param name="metaCacheConfiguration">
+        /// Configuration of the metadata cache which holds entity set information. Null for default configuration. 
+        /// <para />
+        /// This cache holds small amount of data, but should not lose entries. At least one backup recommended.
+        /// </param>
+        /// <param name="dataCacheConfiguration">
+        /// Configuration of the data cache which holds query results. Null for default configuration.
+        /// <para />
+        /// This cache tolerates lost data and can have no backups.
+        /// </param>
+        /// <param name="policy">The caching policy. Null for default <see cref="DbCachingPolicy" />.</param>
+        [SuppressMessage("Microsoft.Design", "CA1062:Validate arguments of public methods", 
+            Justification = "Validation is present")]
+        public IgniteDbConfiguration(IIgnite ignite, CacheConfiguration metaCacheConfiguration,
+            CacheConfiguration dataCacheConfiguration, IDbCachingPolicy policy)
+        {
+            IgniteArgumentCheck.NotNull(ignite, "ignite");
+
+            metaCacheConfiguration = metaCacheConfiguration ?? GetDefaultMetaCacheConfiguration();
+            dataCacheConfiguration = dataCacheConfiguration ?? GetDefaultDataCacheConfiguration();
+
+            var efCache = new DbCache(ignite, metaCacheConfiguration, dataCacheConfiguration);
+
+            var txHandler = new DbTransactionInterceptor(efCache);
+
+            AddInterceptor(txHandler);
+
+            // SetProviderServices is not suitable. We should replace whatever provider there is with our proxy.
+            Loaded += (sender, args) => args.ReplaceService<DbProviderServices>(
+                (services, a) => new DbProviderServicesProxy(services, policy, efCache, txHandler));
+        }
+
+        /// <summary>
+        /// Gets the Ignite instance.
+        /// </summary>
+        private static IIgnite GetOrStartIgnite(IgniteConfiguration cfg)
+        {
+            cfg = cfg ?? new IgniteConfiguration();
+
+            return Ignition.TryGetIgnite(cfg.GridName) ?? Ignition.Start(cfg);
+        }
+
+        /// <summary>
+        /// Gets the configuration.
+        /// </summary>
+        private static IgniteConfiguration GetConfiguration(string sectionName, bool throwIfAbsent)
+        {
+            IgniteArgumentCheck.NotNull(sectionName, "sectionName");
+
+            var section = ConfigurationManager.GetSection(sectionName) as IgniteConfigurationSection;
+
+            if (section != null)
+            {
+                if (section.IgniteConfiguration == null)
+                    throw new IgniteException(string.Format(CultureInfo.InvariantCulture,
+                        "Failed to initialize {0}. {1} with name {2} is defined in <configSections>, " +
+                        "but not present in configuration.",
+                        typeof(IgniteDbConfiguration), typeof(IgniteConfigurationSection), sectionName));
+
+
+                return section.IgniteConfiguration;
+            }
+
+            if (!throwIfAbsent)
+                return null;
+
+            throw new IgniteException(string.Format(CultureInfo.InvariantCulture,
+                "Failed to initialize {0}. Could not find {1} with name {2} in application configuration.",
+                typeof (IgniteDbConfiguration), typeof (IgniteConfigurationSection), sectionName));
+        }
+
+        /// <summary>
+        /// Gets the default meta cache configuration.
+        /// </summary>
+        private static CacheConfiguration GetDefaultMetaCacheConfiguration(string namePrefix = null)
+        {
+            return new CacheConfiguration((namePrefix ?? DefaultCacheNamePrefix) + MetaCacheSuffix)
+            {
+                CacheMode = CacheMode.Partitioned,
+                Backups = 1,
+                AtomicityMode = CacheAtomicityMode.Transactional,  // Required due to IGNITE-3955
+                WriteSynchronizationMode = CacheWriteSynchronizationMode.PrimarySync
+            };
+        }
+
+        /// <summary>
+        /// Gets the default data cache configuration.
+        /// </summary>
+        private static CacheConfiguration GetDefaultDataCacheConfiguration(string namePrefix = null)
+        {
+            return new CacheConfiguration((namePrefix ?? DefaultCacheNamePrefix) + DataCacheSuffix)
+            {
+                CacheMode = CacheMode.Partitioned,
+                Backups = 0,
+                AtomicityMode = CacheAtomicityMode.Atomic,
+                WriteSynchronizationMode = CacheWriteSynchronizationMode.PrimarySync
+            };
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/ArrayDbDataReader.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/ArrayDbDataReader.cs
new file mode 100644
index 0000000..89523f4
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/ArrayDbDataReader.cs
@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+    using System.Collections;
+    using System.Data;
+    using System.Data.Common;
+    using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
+
+    /// <summary>
+    /// Reads the data from array.
+    /// </summary>
+    internal class ArrayDbDataReader : DbDataReader
+    {
+        /** */
+        private readonly object[][] _data;
+
+        /** */
+        private readonly DataReaderField[] _schema;
+
+        /** */
+        private int _pos = -1;
+
+        /** */
+        private bool _closed;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="ArrayDbDataReader"/> class.
+        /// </summary>
+        /// <param name="data">The data.</param>
+        /// <param name="schema">The schema.</param>
+        public ArrayDbDataReader(object[][] data, DataReaderField[] schema)
+        {
+            Debug.Assert(data != null);
+            Debug.Assert(schema != null);
+
+            _data = data;
+            _schema = schema;
+        }
+
+        /** <inheritDoc /> */
+        public override void Close()
+        {
+            _closed = true;
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override DataTable GetSchemaTable()
+        {
+            throw new NotSupportedException();
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override bool NextResult()
+        {
+            return false;  // multiple result sets are not supported
+        }
+
+        /** <inheritDoc /> */
+        public override bool Read()
+        {
+            if (_pos >= _data.Length - 1)
+                return false;
+
+            _pos++;
+
+            return true;
+        }
+
+        /** <inheritDoc /> */
+        public override int Depth
+        {
+            get { return 0; }
+        }
+
+        /** <inheritDoc /> */
+        public override bool IsClosed
+        {
+            get { return _closed; }
+        }
+
+        /** <inheritDoc /> */
+        public override int RecordsAffected
+        {
+            get { return -1; }
+        }
+
+        /** <inheritDoc /> */
+        public override bool GetBoolean(int ordinal)
+        {
+            return (bool) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override byte GetByte(int ordinal)
+        {
+            return (byte) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override long GetBytes(int ordinal, long dataOffset, byte[] buffer, int bufferOffset, int length)
+        {
+            Debug.Assert(buffer != null);
+
+            var data = (byte[]) GetValue(ordinal);
+
+            var size = Math.Min(buffer.Length - bufferOffset, data.Length - dataOffset);
+
+            Array.Copy(data, dataOffset, buffer, bufferOffset, size);
+
+            return size;
+        }
+
+        /** <inheritDoc /> */
+        public override char GetChar(int ordinal)
+        {
+            return (char) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override long GetChars(int ordinal, long dataOffset, char[] buffer, int bufferOffset, int length)
+        {
+            Debug.Assert(buffer != null);
+
+            var data = (char[]) GetValue(ordinal);
+
+            var size = Math.Min(buffer.Length - bufferOffset, data.Length - dataOffset);
+
+            Array.Copy(data, dataOffset, buffer, bufferOffset, size);
+
+            return size;
+        }
+
+        /** <inheritDoc /> */
+        public override Guid GetGuid(int ordinal)
+        {
+            return (Guid) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override short GetInt16(int ordinal)
+        {
+            return (short) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override int GetInt32(int ordinal)
+        {
+            return (int) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override long GetInt64(int ordinal)
+        {
+            return (long) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override DateTime GetDateTime(int ordinal)
+        {
+            return (DateTime) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override string GetString(int ordinal)
+        {
+            return (string) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override object GetValue(int ordinal)
+        {
+            return GetRow()[ordinal];
+        }
+
+        /** <inheritDoc /> */
+        public override int GetValues(object[] values)
+        {
+            var row = GetRow();
+
+            var size = Math.Min(row.Length, values.Length);
+
+            Array.Copy(row, values, size);
+
+            return size;
+        }
+
+        /** <inheritDoc /> */
+        public override bool IsDBNull(int ordinal)
+        {
+            var val = GetValue(ordinal);
+
+            return val == null || val == DBNull.Value;
+        }
+
+        /** <inheritDoc /> */
+        public override int FieldCount
+        {
+            get { return _schema.Length; }
+        }
+
+        /** <inheritDoc /> */
+        public override object this[int ordinal]
+        {
+            get { return GetValue(ordinal); }
+        }
+
+        /** <inheritDoc /> */
+        public override object this[string name]
+        {
+            get { return GetValue(GetOrdinal(name)); }
+        }
+
+        /** <inheritDoc /> */
+        public override bool HasRows
+        {
+            get { return _data.Length > 0; }
+        }
+
+        /** <inheritDoc /> */
+        public override decimal GetDecimal(int ordinal)
+        {
+            return (decimal) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override double GetDouble(int ordinal)
+        {
+            return (double) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override float GetFloat(int ordinal)
+        {
+            return (float) GetValue(ordinal);
+        }
+
+        /** <inheritDoc /> */
+        public override string GetName(int ordinal)
+        {
+            return _schema[ordinal].Name;
+        }
+
+        /** <inheritDoc /> */
+        public override int GetOrdinal(string name)
+        {
+            for (int i = 0; i < _schema.Length; i++)
+            {
+                if (_schema[i].Name == name)
+                    return i;
+            }
+
+            throw new InvalidOperationException("Field not found: " + name);
+        }
+
+        /** <inheritDoc /> */
+        public override string GetDataTypeName(int ordinal)
+        {
+            return _schema[ordinal].DataType;
+        }
+
+        /** <inheritDoc /> */
+        public override Type GetFieldType(int ordinal)
+        {
+            return _schema[ordinal].FieldType;
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override IEnumerator GetEnumerator()
+        {
+            throw new NotSupportedException();
+        }
+
+        /// <summary>
+        /// Gets the row.
+        /// </summary>
+        private object[] GetRow()
+        {
+            if (_pos < 0)
+                throw new InvalidOperationException("Data reading has not started.");
+
+            return _data[_pos];
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderField.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderField.cs
new file mode 100644
index 0000000..0e7baf0
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderField.cs
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+
+    /// <summary>
+    /// Represents a data reader field.
+    /// </summary>
+    [Serializable]
+    internal class DataReaderField
+    {
+        /** */
+        private readonly string _name;
+
+        /** */
+        private readonly Type _fieldType;
+
+        /** */
+        private readonly string _dataType;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DataReaderField"/> class.
+        /// </summary>
+        /// <param name="name">The name.</param>
+        /// <param name="fieldType">The type.</param>
+        /// <param name="dataType">Type of the data.</param>
+        public DataReaderField(string name, Type fieldType, string dataType)
+        {
+            _name = name;
+            _fieldType = fieldType;
+            _dataType = dataType;
+        }
+
+        /// <summary>
+        /// Gets the name.
+        /// </summary>
+        public string Name
+        {
+            get { return _name; }
+        }
+
+        /// <summary>
+        /// Gets the type of the field.
+        /// </summary>
+        public Type FieldType
+        {
+            get { return _fieldType; }
+        }
+
+        /// <summary>
+        /// Gets the type of the data.
+        /// </summary>
+        public string DataType
+        {
+            get { return _dataType; }
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderResult.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderResult.cs
new file mode 100644
index 0000000..48f763c
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DataReaderResult.cs
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+    using System.Collections.Generic;
+    using System.Data;
+    using System.Data.Common;
+    using System.Linq;
+
+    /// <summary>
+    /// Cacheable result of a DbDataReader.
+    /// </summary>
+    [Serializable]
+    internal class DataReaderResult
+    {
+        /** */
+        private readonly object[][] _data;
+
+        /** */
+        private readonly DataReaderField[] _schema;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DataReaderResult"/> class.
+        /// </summary>
+        public DataReaderResult(IDataReader reader)
+        {
+            try
+            {
+                _data = ReadAll(reader).ToArray();
+
+                _schema = new DataReaderField[reader.FieldCount];
+
+                for (int i = 0; i < reader.FieldCount; i++)
+                {
+                    _schema[i] = new DataReaderField(reader.GetName(i), reader.GetFieldType(i), 
+                        reader.GetDataTypeName(i));
+                }
+            }
+            finally 
+            {
+                reader.Close();
+                reader.Dispose();
+            }
+        }
+
+        /// <summary>
+        /// Creates the reader over this instance.
+        /// </summary>
+        public DbDataReader CreateReader()
+        {
+            return new ArrayDbDataReader(_data, _schema);
+        }
+
+        /// <summary>
+        /// Gets the row count.
+        /// </summary>
+        public int RowCount
+        {
+            get { return _data.Length; }
+        }
+
+        /// <summary>
+        /// Reads all data from the reader.
+        /// </summary>
+        private static IEnumerable<object[]> ReadAll(IDataReader reader)
+        {
+            while (reader.Read())
+            {
+                var vals = new object[reader.FieldCount];
+
+                reader.GetValues(vals);
+
+                yield return vals;
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCache.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCache.cs
new file mode 100644
index 0000000..a7ac2c9
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCache.cs
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+    using System.Collections.Generic;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
+    using System.IO;
+    using System.Linq;
+    using System.Runtime.Serialization.Formatters.Binary;
+    using Apache.Ignite.Core;
+    using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Cache;
+    using Apache.Ignite.Core.Cache.Configuration;
+    using Apache.Ignite.Core.Cache.Expiry;
+    using Apache.Ignite.Core.Common;
+    using Apache.Ignite.Core.Impl.Cache;
+    using Apache.Ignite.Core.Impl.Common;
+    using Apache.Ignite.Core.Log;
+
+    /// <summary>
+    /// Database query cache.
+    /// </summary>
+    internal class DbCache
+    {
+        /** Extension id.  */
+        private const int ExtensionId = 1;
+
+        /** Invalidate sets extension operation. */
+        private const int OpInvalidateSets = 1;
+
+        /** Put data extension operation. */
+        private const int OpPutItem = 2;
+
+        /** Get data extension operation. */
+        private const int OpGetItem = 3;
+
+        /** Max number of cached expiry caches. */
+        private const int MaxExpiryCaches = 1000;
+
+        /** Main cache: stores SQL -> QueryResult mappings. */
+        private readonly ICache<string, object> _cache;
+
+        /** Entity set version cache. */
+        private readonly ICache<string, long> _metaCache;
+
+        /** Cached caches per (expiry_seconds * 10). */
+        private volatile Dictionary<long, ICache<string, object>> _expiryCaches =
+            new Dictionary<long, ICache<string, object>>();
+
+        /** Sync object. */
+        private readonly object _syncRoot = new object();
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbCache" /> class.
+        /// </summary>
+        /// <param name="ignite">The ignite.</param>
+        /// <param name="metaCacheConfiguration">The meta cache configuration.</param>
+        /// <param name="dataCacheConfiguration">The data cache configuration.</param>
+        [SuppressMessage("Microsoft.Design", "CA1062:Validate arguments of public methods",
+            Justification = "Validation is present")]
+        public DbCache(IIgnite ignite, CacheConfiguration metaCacheConfiguration, 
+            CacheConfiguration dataCacheConfiguration)
+        {
+            IgniteArgumentCheck.NotNull(ignite, "ignite");
+            IgniteArgumentCheck.NotNull(metaCacheConfiguration, "metaCacheConfiguration");
+            IgniteArgumentCheck.NotNull(dataCacheConfiguration, "metaCacheConfiguration");
+
+            IgniteArgumentCheck.Ensure(metaCacheConfiguration.Name != dataCacheConfiguration.Name, 
+                "dataCacheConfiguration", "Meta and Data cache can't have the same name.");
+
+            _metaCache = ignite.GetOrCreateCache<string, long>(metaCacheConfiguration);
+            _cache = ignite.GetOrCreateCache<string, object>(dataCacheConfiguration);
+
+            var metaCfg = _metaCache.GetConfiguration();
+
+            if (metaCfg.AtomicityMode != CacheAtomicityMode.Transactional)
+                throw new IgniteException("EntityFramework meta cache should be Transactional.");
+
+            if (metaCfg.CacheMode == CacheMode.Partitioned && metaCfg.Backups < 1)
+                ignite.Logger.Warn("EntityFramework meta cache is partitioned and has no backups. " +
+                                   "This can lead to data loss and incorrect query results.");
+        }
+
+        /// <summary>
+        /// Gets the cache key to be used with GetItem and PutItem.
+        /// </summary>
+        public DbCacheKey GetCacheKey(string key, ICollection<EntitySetBase> dependentEntitySets, DbCachingMode mode)
+        {
+            if (mode == DbCachingMode.ReadWrite)
+            {
+                var versions = GetEntitySetVersions(dependentEntitySets);
+
+                return new DbCacheKey(key, dependentEntitySets, versions);
+            }
+
+            if (mode == DbCachingMode.ReadOnly)
+                return new DbCacheKey(key, null, null);
+
+            throw new ArgumentOutOfRangeException("mode");
+        }
+
+        /// <summary>
+        /// Gets the item from cache.
+        /// </summary>
+        public bool GetItem(DbCacheKey key, out object value)
+        {
+            var valueBytes = ((ICacheInternal) _cache).DoOutInOpExtension(ExtensionId, OpGetItem,
+                w => WriteKey(key, w, false), r => r.ReadObject<byte[]>());
+
+            if (valueBytes == null)
+            {
+                value = null;
+
+                return false;
+            }
+
+            using (var ms = new MemoryStream(valueBytes))
+            {
+                value = new BinaryFormatter().Deserialize(ms);
+            }
+
+            return true;
+        }
+
+        /// <summary>
+        /// Puts the item to cache.
+        /// </summary>
+        public void PutItem(DbCacheKey key, object value, TimeSpan absoluteExpiration)
+        {
+            using (var stream = new MemoryStream())
+            {
+                new BinaryFormatter().Serialize(stream, value);
+
+                var valueBytes = stream.ToArray();
+
+                var cache = GetCacheWithExpiry(absoluteExpiration);
+
+                ((ICacheInternal)cache).DoOutInOpExtension<object>(ExtensionId, OpPutItem, w =>
+                {
+                    WriteKey(key, w, true);
+
+                    w.WriteByteArray(valueBytes);
+                }, null);
+            }
+        }
+
+        /// <summary>
+        /// Invalidates the sets.
+        /// </summary>
+        public void InvalidateSets(ICollection<EntitySetBase> entitySets)
+        {
+            Debug.Assert(entitySets != null && entitySets.Count > 0);
+
+            // Increase version for each dependent entity set and run a task to clean up old entries.
+            ((ICacheInternal) _metaCache).DoOutInOpExtension<object>(ExtensionId, OpInvalidateSets, w =>
+            {
+                w.WriteString(_cache.Name);
+
+                w.WriteInt(entitySets.Count);
+
+                foreach (var set in entitySets)
+                    w.WriteString(set.Name);
+            }, null);
+        }
+
+        /// <summary>
+        /// Gets the cache with expiry policy according to provided expiration date.
+        /// </summary>
+        /// <returns>Cache with expiry policy.</returns>
+        // ReSharper disable once UnusedParameter.Local
+        private ICache<string, object> GetCacheWithExpiry(TimeSpan absoluteExpiration)
+        {
+            if (absoluteExpiration == TimeSpan.MaxValue)
+                return _cache;
+
+            // Round up to 0.1 of a second so that we share expiry caches
+            var expirySeconds = GetSeconds(absoluteExpiration);
+
+            ICache<string, object> expiryCache;
+
+            if (_expiryCaches.TryGetValue(expirySeconds, out expiryCache))
+                return expiryCache;
+
+            lock (_syncRoot)
+            {
+                if (_expiryCaches.TryGetValue(expirySeconds, out expiryCache))
+                    return expiryCache;
+
+                // Copy on write with size limit
+                _expiryCaches = _expiryCaches.Count > MaxExpiryCaches
+                    ? new Dictionary<long, ICache<string, object>>()
+                    : new Dictionary<long, ICache<string, object>>(_expiryCaches);
+
+                expiryCache =
+                    _cache.WithExpiryPolicy(GetExpiryPolicy(expirySeconds));
+
+                _expiryCaches[expirySeconds] = expiryCache;
+
+                return expiryCache;
+            }
+        }
+
+        /// <summary>
+        /// Gets the expiry policy.
+        /// </summary>
+        private static ExpiryPolicy GetExpiryPolicy(long absoluteSeconds)
+        {
+            var absolute = absoluteSeconds != long.MaxValue
+                ? TimeSpan.FromSeconds((double)absoluteSeconds / 10)
+                : (TimeSpan?) null;
+
+            return new ExpiryPolicy(absolute, null, null);
+        }
+
+        /// <summary>
+        /// Gets the seconds.
+        /// </summary>
+        private static long GetSeconds(TimeSpan ts)
+        {
+            if (ts == TimeSpan.MaxValue)
+                return long.MaxValue;
+
+            var seconds = ts.TotalSeconds;
+
+            if (seconds < 0)
+                seconds = 0;
+
+            return (long) (seconds * 10);
+        }
+
+        /// <summary>
+        /// Gets the entity set versions.
+        /// </summary>
+        private IDictionary<string, long> GetEntitySetVersions(ICollection<EntitySetBase> sets)
+        {
+            // LINQ Select allocates less that a new List<> will do.
+            var versions = _metaCache.GetAll(sets.Select(x => x.Name));
+
+            // Some versions may be missing, fill up with 0.
+            foreach (var set in sets)
+            {
+                if (!versions.ContainsKey(set.Name))
+                    versions[set.Name] = 0;
+            }
+
+            Debug.Assert(sets.Count == versions.Count);
+
+            return versions;
+        }
+
+        /// <summary>
+        /// Writes the key.
+        /// </summary>
+        private static void WriteKey(DbCacheKey key, IBinaryRawWriter writer, bool includeNames)
+        {
+            writer.WriteString(key.Key);
+
+            if (key.EntitySetVersions != null)
+            {
+                writer.WriteInt(key.EntitySetVersions.Count);
+
+                // Versions should be in the same order, so we can't iterate over the dictionary.
+                foreach (var entitySet in key.EntitySets)
+                {
+                    writer.WriteLong(key.EntitySetVersions[entitySet.Name]);
+
+                    if (includeNames)
+                        writer.WriteString(entitySet.Name);
+                }
+            }
+            else
+            {
+                writer.WriteInt(-1);
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCacheKey.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCacheKey.cs
new file mode 100644
index 0000000..7974ba9
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCacheKey.cs
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System.Collections.Generic;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Diagnostics;
+
+    /// <summary>
+    /// Represents a cache key, including dependent entity sets and their versions.
+    /// </summary>
+    internal class DbCacheKey
+    {
+        /** Original string key. */
+        private readonly string _key;
+
+        /** Ordered entity sets. */
+        private readonly ICollection<EntitySetBase> _entitySets;
+
+        /** Entity set versions. */
+        private readonly IDictionary<string, long> _entitySetVersions;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbCacheKey"/> class.
+        /// </summary>
+        public DbCacheKey(string key, ICollection<EntitySetBase> entitySets, 
+            IDictionary<string, long> entitySetVersions)
+        {
+            Debug.Assert(key != null);
+
+            _key = key;
+            _entitySetVersions = entitySetVersions;
+            _entitySets = entitySets;
+        }
+
+        /// <summary>
+        /// Gets the key.
+        /// </summary>
+        public string Key
+        {
+            get { return _key; }
+        }
+
+        /// <summary>
+        /// Gets the entity sets.
+        /// </summary>
+        public ICollection<EntitySetBase> EntitySets
+        {
+            get { return _entitySets; }
+        }
+
+        /// <summary>
+        /// Gets the entity set versions.
+        /// </summary>
+        public IDictionary<string, long> EntitySetVersions
+        {
+            get { return _entitySetVersions; }
+        }
+
+        ///// <summary>
+        ///// Gets the versioned key.
+        ///// </summary>
+        //public void GetStringKey()
+        //{
+        //    if (_entitySetVersions == null)
+        //        return _key;
+
+        //    var sb = new StringBuilder(_key);
+
+        //    // Versions should be in the same order, so we can't iterate over the dictionary.
+        //    foreach (var entitySet in _entitySets)
+        //        sb.AppendFormat("_{0}", _entitySetVersions[entitySet.Name]);
+
+        //    return sb.ToString();
+        //}
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandDefinitionProxy.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandDefinitionProxy.cs
new file mode 100644
index 0000000..7057628
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandDefinitionProxy.cs
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System.Data.Common;
+    using System.Data.Entity.Core.Common;
+    using System.Diagnostics;
+
+    internal class DbCommandDefinitionProxy : DbCommandDefinition
+    {
+        /** */
+        private readonly DbCommandDefinition _definition;
+
+        /** */
+        private readonly DbCommandInfo _info;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbCommandDefinitionProxy"/> class.
+        /// </summary>
+        public DbCommandDefinitionProxy(DbCommandDefinition definition, DbCommandInfo info)
+        {
+            Debug.Assert(definition != null);
+
+            var proxy = definition as DbCommandDefinitionProxy;
+            _definition = proxy != null ? proxy._definition : definition;
+
+            _info = info;
+        }
+
+        /** <inheritDoc /> */
+        public override DbCommand CreateCommand()
+        {
+            return new DbCommandProxy(_definition.CreateCommand(), _info);
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandInfo.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandInfo.cs
new file mode 100644
index 0000000..7f18170
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandInfo.cs
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System.Collections.Generic;
+    using System.Data.Entity.Core.Common.CommandTrees;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Diagnostics;
+    using System.Linq;
+
+    /// <summary>
+    /// Command info.
+    /// </summary>
+    internal class DbCommandInfo
+    {
+        /** */
+        private readonly bool _isModification;
+
+        /** */
+        private readonly DbCache _cache;
+
+        /** */
+        private readonly EntitySetBase[] _affectedEntitySets;
+
+        /** */
+        private readonly IDbCachingPolicy _policy;
+
+        /** */
+        private readonly DbTransactionInterceptor _txHandler;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbCommandInfo"/> class.
+        /// </summary>
+        public DbCommandInfo(DbCommandTree tree, DbCache cache, IDbCachingPolicy policy, DbTransactionInterceptor txHandler)
+        {
+            Debug.Assert(tree != null);
+            Debug.Assert(cache != null);
+            Debug.Assert(txHandler != null);
+
+            var qryTree = tree as DbQueryCommandTree;
+
+            if (qryTree != null)
+            {
+                _isModification = false;
+
+                _affectedEntitySets = GetAffectedEntitySets(qryTree.Query);
+            }
+            else
+            {
+                _isModification = true;
+
+                var modify = tree as DbModificationCommandTree;
+
+                if (modify != null)
+                    _affectedEntitySets = GetAffectedEntitySets(modify.Target.Expression);
+                else
+                    // Functions (stored procedures) are not supported.
+                    Debug.Assert(tree is DbFunctionCommandTree);
+            }
+
+            _cache = cache;
+            _policy = policy;
+            _txHandler = txHandler;
+        }
+
+        /// <summary>
+        /// Gets a value indicating whether this command is a query and does not modify data.
+        /// </summary>
+        public bool IsModification
+        {
+            get { return _isModification; }
+        }
+
+        /// <summary>
+        /// Gets or sets the cache.
+        /// </summary>
+        public DbCache Cache
+        {
+            get { return _cache; }
+        }
+
+        /// <summary>
+        /// Gets the affected entity sets.
+        /// </summary>
+        public ICollection<EntitySetBase> AffectedEntitySets
+        {
+            get { return _affectedEntitySets; }
+        }
+
+        /// <summary>
+        /// Gets the policy.
+        /// </summary>
+        public IDbCachingPolicy Policy
+        {
+            get { return _policy; }
+        }
+
+        /// <summary>
+        /// Gets the tx handler.
+        /// </summary>
+        public DbTransactionInterceptor TxHandler
+        {
+            get { return _txHandler; }
+        }
+
+        /// <summary>
+        /// Gets the affected entity sets.
+        /// </summary>
+        private static EntitySetBase[] GetAffectedEntitySets(DbExpression expression)
+        {
+            var visitor = new ScanExpressionVisitor();
+
+            expression.Accept(visitor);
+
+            return visitor.EntitySets.ToArray();
+        }
+
+        /// <summary>
+        /// Visits Scan expressions and collects entity set names.
+        /// </summary>
+        private class ScanExpressionVisitor : BasicCommandTreeVisitor
+        {
+            /** */
+            private readonly List<EntitySetBase> _entitySets = new List<EntitySetBase>();
+
+            /// <summary>
+            /// Gets the entity sets.
+            /// </summary>
+            public IEnumerable<EntitySetBase> EntitySets
+            {
+                get { return _entitySets; }
+            }
+
+            /** <inheritdoc /> */
+            public override void Visit(DbScanExpression expression)
+            {
+                _entitySets.Add(expression.Target);
+
+                base.Visit(expression);
+            }
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandProxy.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandProxy.cs
new file mode 100644
index 0000000..e3353d5
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbCommandProxy.cs
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+    using System.Data;
+    using System.Data.Common;
+    using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
+    using System.Text;
+
+    /// <summary>
+    /// Command proxy.
+    /// </summary>
+    internal class DbCommandProxy : DbCommand
+    {
+        /** */
+        private readonly DbCommand _command;
+
+        /** */
+        private readonly DbCommandInfo _commandInfo;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbCommandProxy"/> class.
+        /// </summary>
+        public DbCommandProxy(DbCommand command, DbCommandInfo info)
+        {
+            Debug.Assert(command != null);
+            Debug.Assert(info != null);
+
+            _command = command;
+            _commandInfo = info;
+        }
+
+        /// <summary>
+        /// Gets the inner command.
+        /// </summary>
+        [ExcludeFromCodeCoverage]
+        public DbCommand InnerCommand
+        {
+            get { return _command; }
+        }
+
+        /// <summary>
+        /// Gets the command information.
+        /// </summary>
+        [ExcludeFromCodeCoverage]
+        public DbCommandInfo CommandInfo
+        {
+            get { return _commandInfo; }
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override void Prepare()
+        {
+            _command.Prepare();
+        }
+
+        /** <inheritDoc /> */
+        public override string CommandText
+        {
+            get { return _command.CommandText; }
+            set { _command.CommandText = value; }
+        }
+
+        /** <inheritDoc /> */
+        public override int CommandTimeout
+        {
+            get { return _command.CommandTimeout; }
+            set { _command.CommandTimeout = value; }
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override CommandType CommandType
+        {
+            get { return _command.CommandType; }
+            set { _command.CommandType = value; }
+        }
+
+        /** <inheritDoc /> */
+        public override UpdateRowSource UpdatedRowSource
+        {
+            get { return _command.UpdatedRowSource; }
+            set { _command.UpdatedRowSource = value; }
+        }
+
+        /** <inheritDoc /> */
+        protected override DbConnection DbConnection
+        {
+            get { return _command.Connection; }
+            set { _command.Connection = value; }
+        }
+
+        /** <inheritDoc /> */
+        protected override DbParameterCollection DbParameterCollection
+        {
+            get { return _command.Parameters; }
+        }
+
+        /** <inheritDoc /> */
+        protected override DbTransaction DbTransaction
+        {
+            get { return _command.Transaction; }
+            set { _command.Transaction = value; }
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override bool DesignTimeVisible
+        {
+            get { return _command.DesignTimeVisible; }
+            set { _command.DesignTimeVisible = value; }
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override void Cancel()
+        {
+            _command.Cancel();
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        protected override DbParameter CreateDbParameter()
+        {
+            return _command.CreateParameter();
+        }
+
+        /** <inheritDoc /> */
+        protected override DbDataReader ExecuteDbDataReader(CommandBehavior behavior)
+        {
+            if (_commandInfo.IsModification)
+            {
+                // Execute reader, then invalidate cached data.
+                var dbReader = _command.ExecuteReader(behavior);
+
+                InvalidateCache();
+
+                return dbReader;
+            }
+
+            if (Transaction != null)
+            {
+                return _command.ExecuteReader(behavior);
+            }
+
+            var queryInfo = GetQueryInfo();
+            var strategy = _commandInfo.Policy.GetCachingMode(queryInfo);
+            var cacheKey = _commandInfo.Cache.GetCacheKey(GetKey(), _commandInfo.AffectedEntitySets, strategy);
+
+            object cachedRes;
+            if (_commandInfo.Cache.GetItem(cacheKey, out cachedRes))
+                return ((DataReaderResult) cachedRes).CreateReader();
+
+            var reader = _command.ExecuteReader(behavior);
+
+            if (reader.RecordsAffected > 0)
+                return reader;  // Queries that modify anything are never cached.
+
+            // Check if cacheable.
+            if (!_commandInfo.Policy.CanBeCached(queryInfo))
+                return reader;
+
+            // Read into memory.
+            var res = new DataReaderResult(reader);
+
+            // Check if specific row count is cacheable.
+            if (!_commandInfo.Policy.CanBeCached(queryInfo, res.RowCount))
+                return res.CreateReader();
+
+            PutResultToCache(cacheKey, res, queryInfo);
+
+            return res.CreateReader();
+        }
+
+        /// <summary>
+        /// Invalidates the cache.
+        /// </summary>
+        private void InvalidateCache()
+        {
+            _commandInfo.TxHandler.InvalidateCache(_commandInfo.AffectedEntitySets, Transaction);
+        }
+
+        /** <inheritDoc /> */
+        public override int ExecuteNonQuery()
+        {
+            var res = _command.ExecuteNonQuery();
+
+            // Invalidate AFTER updating the data.
+            if (_commandInfo.IsModification)
+            {
+                InvalidateCache();
+            }
+
+            return res;
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override object ExecuteScalar()
+        {
+            // This method is never used by EntityFramework.
+            // Even EntityCommand.ExecuteScalar goes to ExecuteDbDataReader.
+            return _command.ExecuteScalar();
+        }
+
+        /// <summary>
+        /// Puts the result to cache.
+        /// </summary>
+        private void PutResultToCache(DbCacheKey key, object result, DbQueryInfo queryInfo)
+        {
+            var expiration = _commandInfo.Policy != null
+                ? _commandInfo.Policy.GetExpirationTimeout(queryInfo)
+                : TimeSpan.MaxValue;
+
+            _commandInfo.Cache.PutItem(key, result, expiration);
+        }
+
+        /// <summary>
+        /// Gets the cache key.
+        /// </summary>
+        private string GetKey()
+        {
+            if (string.IsNullOrEmpty(CommandText))
+                throw new NotSupportedException("Ignite Entity Framework Caching " +
+                                                "requires non-empty DbCommand.CommandText.");
+
+            var sb = new StringBuilder();
+
+            sb.AppendFormat("{0}:{1}|", Connection.Database, CommandText);
+
+            foreach (DbParameter param in Parameters)
+                sb.AppendFormat("{0}={1},", param.ParameterName, param.Value);
+
+            return sb.ToString();
+        }
+
+        /// <summary>
+        /// Gets the query information.
+        /// </summary>
+        private DbQueryInfo GetQueryInfo()
+        {
+            return new DbQueryInfo(_commandInfo.AffectedEntitySets, CommandText, DbParameterCollection);
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbProviderServicesProxy.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbProviderServicesProxy.cs
new file mode 100644
index 0000000..8e01295
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbProviderServicesProxy.cs
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma warning disable 618, 672
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System;
+    using System.Collections.Generic;
+    using System.Data.Common;
+    using System.Data.Entity.Core.Common;
+    using System.Data.Entity.Core.Common.CommandTrees;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Data.Entity.Spatial;
+    using System.Diagnostics;
+    using System.Diagnostics.CodeAnalysis;
+
+    /// <summary>
+    /// DbProviderServices proxy which substitutes custom commands.
+    /// </summary>
+    internal class DbProviderServicesProxy : DbProviderServices
+    {
+        /** */
+        private static readonly DbCachingPolicy DefaultPolicy = new DbCachingPolicy();
+
+        /** */
+        private readonly IDbCachingPolicy _policy;
+        
+        /** */
+        private readonly DbProviderServices _services;
+        
+        /** */
+        private readonly DbCache _cache;
+
+        /** */
+        private readonly DbTransactionInterceptor _txHandler;
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbProviderServicesProxy"/> class.
+        /// </summary>
+        /// <param name="services">The services.</param>
+        /// <param name="policy">The policy.</param>
+        /// <param name="cache">The cache.</param>
+        /// <param name="txHandler">Transaction handler.</param>
+        public DbProviderServicesProxy(DbProviderServices services, IDbCachingPolicy policy, DbCache cache, 
+            DbTransactionInterceptor txHandler)
+        {
+            Debug.Assert(services != null);
+            Debug.Assert(cache != null);
+            Debug.Assert(txHandler != null);
+
+            var proxy = services as DbProviderServicesProxy;
+            _services = proxy != null ? proxy._services : services;
+
+            _policy = policy ?? DefaultPolicy;
+            _cache = cache;
+            _txHandler = txHandler;
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override DbCommandDefinition CreateCommandDefinition(DbCommand prototype)
+        {
+            var proxy = prototype as DbCommandProxy;
+
+            if (proxy == null)
+                return _services.CreateCommandDefinition(prototype);
+
+            return new DbCommandDefinitionProxy(_services.CreateCommandDefinition(proxy.InnerCommand), 
+                proxy.CommandInfo);
+        }
+
+        /** <inheritDoc /> */
+        protected override DbCommandDefinition CreateDbCommandDefinition(DbProviderManifest providerManifest, 
+            DbCommandTree commandTree)
+        {
+            return new DbCommandDefinitionProxy(_services.CreateCommandDefinition(providerManifest, commandTree), 
+                new DbCommandInfo(commandTree, _cache, _policy, _txHandler));
+        }
+
+        /** <inheritDoc /> */
+        protected override string GetDbProviderManifestToken(DbConnection connection)
+        {
+            return _services.GetProviderManifestToken(connection);
+        }
+
+        /** <inheritDoc /> */
+        protected override DbProviderManifest GetDbProviderManifest(string manifestToken)
+        {
+            return _services.GetProviderManifest(manifestToken);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override void RegisterInfoMessageHandler(DbConnection connection, Action<string> handler)
+        {
+            _services.RegisterInfoMessageHandler(connection, handler);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        protected override DbSpatialDataReader GetDbSpatialDataReader(DbDataReader fromReader, string manifestToken)
+        {
+            return _services.GetSpatialDataReader(fromReader, manifestToken);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        protected override DbSpatialServices DbGetSpatialServices(string manifestToken)
+        {
+            return _services.GetSpatialServices(manifestToken);
+        }
+        protected override void SetDbParameterValue(DbParameter parameter, TypeUsage parameterType, object value)
+        {
+            _services.SetParameterValue(parameter, parameterType, value);
+        }
+
+        /** <inheritDoc /> */
+        protected override string DbCreateDatabaseScript(string providerManifestToken, StoreItemCollection storeItemCollection)
+        {
+            return _services.CreateDatabaseScript(providerManifestToken, storeItemCollection);
+        }
+
+        /** <inheritDoc /> */
+        protected override void DbCreateDatabase(DbConnection connection, int? commandTimeout, StoreItemCollection storeItemCollection)
+        {
+            _services.CreateDatabase(connection, commandTimeout, storeItemCollection);
+        }
+
+        /** <inheritDoc /> */
+        protected override bool DbDatabaseExists(DbConnection connection, int? commandTimeout, StoreItemCollection storeItemCollection)
+        {
+            return _services.DatabaseExists(connection, commandTimeout, storeItemCollection);
+        }
+
+        /** <inheritDoc /> */
+        protected override void DbDeleteDatabase(DbConnection connection, int? commandTimeout, StoreItemCollection storeItemCollection)
+        {
+            _services.DeleteDatabase(connection, commandTimeout, storeItemCollection);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override object GetService(Type type, object key)
+        {
+            return _services.GetService(type, key);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public override IEnumerable<object> GetServices(Type type, object key)
+        {
+            return _services.GetServices(type, key);
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbTransactionInterceptor.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbTransactionInterceptor.cs
new file mode 100644
index 0000000..601868e
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Impl/DbTransactionInterceptor.cs
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.EntityFramework.Impl
+{
+    using System.Collections.Concurrent;
+    using System.Collections.Generic;
+    using System.Data;
+    using System.Data.Common;
+    using System.Data.Entity.Core.Metadata.Edm;
+    using System.Data.Entity.Infrastructure.Interception;
+    using System.Diagnostics.CodeAnalysis;
+
+    /// <summary>
+    /// Intercepts transaction events.
+    /// </summary>
+    internal class DbTransactionInterceptor : IDbTransactionInterceptor
+    {
+        /** Cache. */
+        private readonly DbCache _cache;
+
+        /** Map from tx to dependent sets. HashSet because same sets can be affected multiple times within a tx. */
+        private readonly ConcurrentDictionary<DbTransaction, HashSet<EntitySetBase>> _entitySets 
+            = new ConcurrentDictionary<DbTransaction, HashSet<EntitySetBase>>();
+
+        /// <summary>
+        /// Initializes a new instance of the <see cref="DbTransactionInterceptor"/> class.
+        /// </summary>
+        /// <param name="cache">The cache.</param>
+        public DbTransactionInterceptor(DbCache cache)
+        {
+            _cache = cache;
+        }
+
+        /** <inheritDoc /> */
+        public void InvalidateCache(ICollection<EntitySetBase> entitySets, DbTransaction transaction)
+        {
+            if (transaction == null)
+            {
+                // Invalidate immediately.
+                _cache.InvalidateSets(entitySets);
+            }
+            else
+            {
+                // Postpone until commit.
+                var sets = _entitySets.GetOrAdd(transaction, _ => new HashSet<EntitySetBase>());
+
+                foreach (var set in entitySets)
+                    sets.Add(set);
+            }
+        }
+
+        /** <inheritDoc /> */
+        public void ConnectionGetting(DbTransaction transaction, DbTransactionInterceptionContext<DbConnection> interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        public void ConnectionGot(DbTransaction transaction, DbTransactionInterceptionContext<DbConnection> interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public void IsolationLevelGetting(DbTransaction transaction, DbTransactionInterceptionContext<IsolationLevel> interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public void IsolationLevelGot(DbTransaction transaction, DbTransactionInterceptionContext<IsolationLevel> interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        public void Committing(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        public void Committed(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            HashSet<EntitySetBase> entitySets;
+            if (_entitySets.TryGetValue(transaction, out entitySets))
+                _cache.InvalidateSets(entitySets);
+        }
+
+        /** <inheritDoc /> */
+        public void Disposing(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        public void Disposed(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            HashSet<EntitySetBase> val;
+            _entitySets.TryRemove(transaction, out val);
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public void RollingBack(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            // No-op
+        }
+
+        /** <inheritDoc /> */
+        [ExcludeFromCodeCoverage]
+        public void RolledBack(DbTransaction transaction, DbTransactionInterceptionContext interceptionContext)
+        {
+            // No-op
+        }
+    }
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..7ce4c5f
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/Properties/AssemblyInfo.cs
@@ -0,0 +1,41 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+[assembly: AssemblyTitle("Apache.Ignite.EntityFramework")]
+[assembly: AssemblyDescription("Apache Ignite.NET EntityFramework integration")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("Apache Software Foundation")]
+[assembly: AssemblyProduct("Apache Ignite.NET")]
+[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+[assembly: ComVisible(false)]
+[assembly: Guid("c558518a-c1a0-4224-aaa9-a8688474b4dc")]
+
+[assembly: AssemblyVersion("1.8.0.14218")]
+[assembly: AssemblyFileVersion("1.8.0.14218")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
+
+[assembly: CLSCompliant(true)]
+
+[assembly: InternalsVisibleTo("Apache.Ignite.EntityFramework.Tests, PublicKey=00240000048000009400000006020000002400005253413100040000010001005f45ca91396d3bb682c38d96bdc6e9ac5855a2b8f7dd7434493c278ceb75cae29d452714a376221e5bfc26dfc7dadcdbe9d0a8bb04b1945f6c326089481fc65da5fa8fc728fa9dde5fa2e1599f89678c6b1b38c59d5deef7d012eced64941d5d065aff987ec0196f5b352213d5c04b982647d7fb3bfb2496b890afc5ef1391b0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.EntityFramework/packages.config b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/packages.config
new file mode 100644
index 0000000..c623cae
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.EntityFramework/packages.config
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<packages>
+  <package id="EntityFramework" version="6.1.3" targetFramework="net40" />
+</packages>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.FxCop b/modules/platforms/dotnet/Apache.Ignite.FxCop
index 5ada560..109f59f 100644
--- a/modules/platforms/dotnet/Apache.Ignite.FxCop
+++ b/modules/platforms/dotnet/Apache.Ignite.FxCop
@@ -23,6 +23,8 @@
  <Targets>
   <Target Name="$(ProjectDir)/Apache.Ignite.Core/bin/Debug/Apache.Ignite.Core.dll" Analyze="True" AnalyzeAllChildren="True" />
   <Target Name="$(ProjectDir)/Apache.Ignite.Linq/bin/Debug/Apache.Ignite.Linq.dll" Analyze="True" AnalyzeAllChildren="True" />
+  <Target Name="$(ProjectDir)/Apache.Ignite.AspNet/bin/Debug/Apache.Ignite.AspNet.dll" Analyze="True" AnalyzeAllChildren="True" />
+  <Target Name="$(ProjectDir)/Apache.Ignite.NLog/bin/Debug/Apache.Ignite.NLog.dll" Analyze="True" AnalyzeAllChildren="True" />
  </Targets>
  <Rules>
   <RuleFiles>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj
index e935f3f..72e050c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.csproj
@@ -44,11 +44,6 @@
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.Core" />
-    <Reference Include="System.Xml.Linq" />
-    <Reference Include="System.Data.DataSetExtensions" />
-    <Reference Include="Microsoft.CSharp" />
-    <Reference Include="System.Data" />
-    <Reference Include="System.Xml" />
   </ItemGroup>
   <ItemGroup>
     <Compile Include="CacheExtensions.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.nuspec b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.nuspec
index 088ca13..330ed29 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.nuspec
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Apache.Ignite.Linq.nuspec
@@ -46,9 +46,6 @@
             
 More info: https://apacheignite-net.readme.io/
         </description>
-        <summary>
-            LINQ Provider for Apache Ignite
-        </summary>
         <releaseNotes></releaseNotes>
         <copyright>Copyright 2016</copyright>
         <tags>Apache Ignite In-Memory Distributed Computing SQL NoSQL LINQ Grid Map Reduce Cache linqpad-samples</tags>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/ICacheQueryable.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/ICacheQueryable.cs
index 684f746..ef641e2 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/ICacheQueryable.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/ICacheQueryable.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 namespace Apache.Ignite.Linq
 {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs
index 3d48f41..8dfddc7 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheFieldsQueryExecutor.cs
@@ -75,38 +75,6 @@
             _enforceJoinOrder = enforceJoinOrder;
         }
 
-        /// <summary>
-        /// Gets the local flag.
-        /// </summary>
-        public bool Local
-        {
-            get { return _local; }
-        }
-
-        /// <summary>
-        /// Gets the size of the page.
-        /// </summary>
-        public int PageSize
-        {
-            get { return _pageSize; }
-        }
-
-        /// <summary>
-        /// Gets a value indicating whether distributed joins are enabled.
-        /// </summary>
-        public bool EnableDistributedJoins
-        {
-            get { return _enableDistributedJoins; }
-        }
-
-        /// <summary>
-        /// Gets a value indicating whether join order should be enforced.
-        /// </summary>
-        public bool EnforceJoinOrder
-        {
-            get { return _enforceJoinOrder; }
-        }
-
         /** <inheritdoc /> */
         public T ExecuteScalar<T>(QueryModel queryModel)
         {
@@ -282,7 +250,7 @@
         /// <summary>
         /// Gets the fields query.
         /// </summary>
-        private SqlFieldsQuery GetFieldsQuery(string text, object[] args)
+        internal SqlFieldsQuery GetFieldsQuery(string text, object[] args)
         {
             return new SqlFieldsQuery(text, _local, args)
             {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryableBase.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryableBase.cs
index 5dc40ab..21a7850 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryableBase.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryableBase.cs
@@ -66,12 +66,7 @@
             var data = GetQueryData();
             var executor = CacheQueryProvider.Executor;
 
-            return new SqlFieldsQuery(data.QueryText, executor.Local, data.Parameters.ToArray())
-            {
-                EnableDistributedJoins = executor.EnableDistributedJoins,
-                EnforceJoinOrder = executor.EnforceJoinOrder,
-                PageSize = executor.PageSize
-            };
+            return executor.GetFieldsQuery(data.QueryText, data.Parameters.ToArray());
         }
 
         /** <inheritdoc /> */
@@ -143,7 +138,8 @@
         /// </returns>
         public override string ToString()
         {
-            return GetQueryData().ToString();
+            return string.Format("CacheQueryable [CacheName={0}, TableName={1}, Query={2}]", 
+                CacheName, TableName, GetFieldsQuery());
         }
     }
 }
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Properties/AssemblyInfo.cs
index d47bef9..a115145 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System;
 using System.Reflection;
@@ -24,7 +24,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -37,4 +37,4 @@
 [assembly: AssemblyFileVersion("1.8.0.14218")]
 [assembly: AssemblyInformationalVersion("1.8.0")]
 
-[assembly: CLSCompliant(true)]
+[assembly: CLSCompliant(true)]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.NLog/Apache.Ignite.NLog.csproj b/modules/platforms/dotnet/Apache.Ignite.NLog/Apache.Ignite.NLog.csproj
index 147dc37..9fc6ffc 100644
--- a/modules/platforms/dotnet/Apache.Ignite.NLog/Apache.Ignite.NLog.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.NLog/Apache.Ignite.NLog.csproj
@@ -42,11 +42,6 @@
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.Core" />
-    <Reference Include="System.Xml.Linq" />
-    <Reference Include="System.Data.DataSetExtensions" />
-    <Reference Include="Microsoft.CSharp" />
-    <Reference Include="System.Data" />
-    <Reference Include="System.Xml" />
   </ItemGroup>
   <ItemGroup>
     <Compile Include="IgniteNLogLogger.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite.NLog/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.NLog/Properties/AssemblyInfo.cs
index 50220d2..04214b4 100644
--- a/modules/platforms/dotnet/Apache.Ignite.NLog/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.NLog/Properties/AssemblyInfo.cs
@@ -33,8 +33,8 @@
 // The following GUID is for the ID of the typelib if this project is exposed to COM
 [assembly: Guid("c6b58e4a-a2e9-4554-ad02-68ce6da5cfb7")]
 
-[assembly: AssemblyVersion("1.8.0.14218")]
-[assembly: AssemblyFileVersion("1.8.0.14218")]
+[assembly: AssemblyVersion("1.8.0.13244")]
+[assembly: AssemblyFileVersion("1.8.0.13244")]
 [assembly: AssemblyInformationalVersion("1.8.0")]
 
 [assembly: CLSCompliant(true)]
diff --git a/modules/platforms/dotnet/Apache.Ignite.NLog/packages.config b/modules/platforms/dotnet/Apache.Ignite.NLog/packages.config
index 15659a4..074fd42 100644
--- a/modules/platforms/dotnet/Apache.Ignite.NLog/packages.config
+++ b/modules/platforms/dotnet/Apache.Ignite.NLog/packages.config
@@ -1,4 +1,5 @@
 <?xml version="1.0" encoding="utf-8"?>
+
 <!--
   Licensed to the Apache Software Foundation (ASF) under one or more
   contributor license agreements.  See the NOTICE file distributed with
@@ -15,6 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <packages>
   <package id="NLog" version="4.3.7" targetFramework="net40" />
 </packages>
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.sln b/modules/platforms/dotnet/Apache.Ignite.sln
index de7cf19..fed0821 100644
--- a/modules/platforms/dotnet/Apache.Ignite.sln
+++ b/modules/platforms/dotnet/Apache.Ignite.sln
@@ -42,6 +42,10 @@
 EndProject
 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Apache.Ignite.Log4Net", "Apache.Ignite.log4net\Apache.Ignite.Log4Net.csproj", "{6F82D669-382E-4435-8092-68C4440146D8}"
 EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Apache.Ignite.EntityFramework", "Apache.Ignite.EntityFramework\Apache.Ignite.EntityFramework.csproj", "{C558518A-C1A0-4224-AAA9-A8688474B4DC}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Apache.Ignite.EntityFramework.Tests", "Apache.Ignite.EntityFramework.Tests\Apache.Ignite.EntityFramework.Tests.csproj", "{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}"
+EndProject
 Global
 	GlobalSection(SolutionConfigurationPlatforms) = preSolution
 		Debug|Any CPU = Debug|Any CPU
@@ -216,6 +220,30 @@
 		{6F82D669-382E-4435-8092-68C4440146D8}.Release|x64.Build.0 = Release|Any CPU
 		{6F82D669-382E-4435-8092-68C4440146D8}.Release|x86.ActiveCfg = Release|Any CPU
 		{6F82D669-382E-4435-8092-68C4440146D8}.Release|x86.Build.0 = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|x64.ActiveCfg = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|x64.Build.0 = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|x86.ActiveCfg = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Debug|x86.Build.0 = Debug|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|Any CPU.Build.0 = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|x64.ActiveCfg = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|x64.Build.0 = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|x86.ActiveCfg = Release|Any CPU
+		{C558518A-C1A0-4224-AAA9-A8688474B4DC}.Release|x86.Build.0 = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|x64.ActiveCfg = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|x64.Build.0 = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|x86.ActiveCfg = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Debug|x86.Build.0 = Debug|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|Any CPU.Build.0 = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|x64.ActiveCfg = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|x64.Build.0 = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|x86.ActiveCfg = Release|Any CPU
+		{CDA5700E-78F3-4A9E-A9B0-704CBE94651C}.Release|x86.Build.0 = Release|Any CPU
 	EndGlobalSection
 	GlobalSection(SolutionProperties) = preSolution
 		HideSolutionNode = FALSE
diff --git a/modules/platforms/dotnet/Apache.Ignite/Apache.Ignite.csproj b/modules/platforms/dotnet/Apache.Ignite/Apache.Ignite.csproj
index 747e1a5..e98ddd1 100644
--- a/modules/platforms/dotnet/Apache.Ignite/Apache.Ignite.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite/Apache.Ignite.csproj
@@ -36,11 +36,6 @@
     <Reference Include="System.Configuration.Install" />
     <Reference Include="System.Core" />
     <Reference Include="System.ServiceProcess" />
-    <Reference Include="System.Xml.Linq" />
-    <Reference Include="System.Data.DataSetExtensions" />
-    <Reference Include="Microsoft.CSharp" />
-    <Reference Include="System.Data" />
-    <Reference Include="System.Xml" />
   </ItemGroup>
   <ItemGroup>
     <Compile Include="Config\AppSettingsConfigurator.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite/IgniteRunner.cs b/modules/platforms/dotnet/Apache.Ignite/IgniteRunner.cs
index 68a8445..5703aa6 100644
--- a/modules/platforms/dotnet/Apache.Ignite/IgniteRunner.cs
+++ b/modules/platforms/dotnet/Apache.Ignite/IgniteRunner.cs
@@ -126,17 +126,19 @@
         {
             Console.WriteLine("Usage: Apache.Ignite.exe [/install] [/uninstall] [-options]");
             Console.WriteLine("");
-            Console.WriteLine("\t/install [-options]    installs Ignite Windows service with provided options");
-            Console.WriteLine("\t/uninstall             uninstalls Ignite Windows service");
+            Console.WriteLine("\t/install [-options]    installs Ignite Windows service with provided options.");
+            Console.WriteLine("\t/uninstall             uninstalls Ignite Windows service.");
             Console.WriteLine("");
             Console.WriteLine("Options:");
-            Console.WriteLine("\t-IgniteHome            path to Ignite installation directory (if not provided IGNITE_HOME environment variable is used)");
-            Console.WriteLine("\t-springConfigUrl       path to spring configuration file (if not provided \"config/default-config.xml\" is used)");
-            Console.WriteLine("\t-jvmDllPath            path to JVM library jvm.dll (if not provided JAVA_HOME environment variable is used)");
-            Console.WriteLine("\t-jvmClasspath          classpath passed to JVM (enlist additional jar files here)");
-            Console.WriteLine("\t-suppressWarnings      wether to print warnings");
-            Console.WriteLine("\t-J<javaOption>         JVM options passed to created JVM");
-            Console.WriteLine("\t-assembly=userLib.dll  additional .Net assemblies");
+            Console.WriteLine("\t-IgniteHome            path to Ignite installation directory (if not provided IGNITE_HOME environment variable is used).");
+            Console.WriteLine("\t-ConfigSectionName     name of the IgniteConfigurationSection in app.config to use.");
+            Console.WriteLine("\t-ConfigFileName        path to the app.config file (if not provided Apache.Ignite.exe.config is used).");
+            Console.WriteLine("\t-springConfigUrl       path to Spring configuration file.");
+            Console.WriteLine("\t-jvmDllPath            path to JVM library jvm.dll (if not provided JAVA_HOME environment variable is used).");
+            Console.WriteLine("\t-jvmClasspath          classpath passed to JVM (enlist additional jar files here).");
+            Console.WriteLine("\t-suppressWarnings      whether to print warnings.");
+            Console.WriteLine("\t-J<javaOption>         JVM options passed to created JVM.");
+            Console.WriteLine("\t-assembly=userLib.dll  additional .NET assemblies to be loaded.");
             Console.WriteLine("\t-jvmInitialMemoryMB    Initial Java heap size, in megabytes. Maps to -Xms Java parameter. Defaults to 512.");
             Console.WriteLine("\t-jvmMaxMemoryMB        Maximum Java heap size, in megabytes. Maps to -Xmx Java parameter. Defaults to 1024.");
             Console.WriteLine("");
diff --git a/modules/platforms/dotnet/Apache.Ignite/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite/Properties/AssemblyInfo.cs
index 82e27b1..7127e3c 100644
--- a/modules/platforms/dotnet/Apache.Ignite/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite/Properties/AssemblyInfo.cs
@@ -1,19 +1,19 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
 using System.Runtime.InteropServices;
@@ -23,7 +23,7 @@
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -33,4 +33,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Properties/AssemblyInfo.cs
index 4f55039..42fcb29 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Properties/AssemblyInfo.cs
@@ -1,29 +1,29 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
-using System.Reflection;
-using System.Runtime.InteropServices;
+using System.Reflection;
+using System.Runtime.InteropServices;
 
 [assembly: AssemblyTitle("Apache Ignite.NET Examples")]
 [assembly: AssemblyDescription("")]
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -33,4 +33,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Binary/Employee.cs b/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Binary/Employee.cs
index b746bdf..d4637e3 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Binary/Employee.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Binary/Employee.cs
@@ -34,7 +34,7 @@
         /// <param name="address">Address.</param>
         /// <param name="departments">Departments.</param>
         /// <param name="organizationId">The organization identifier.</param>
-        public Employee(string name, long salary, Address address, ICollection<string> departments, 
+        public Employee(string name, long salary, Address address, ICollection<string> departments,
             int organizationId = 0)
         {
             Name = name;
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Properties/AssemblyInfo.cs
index 471e7e9..90c2974 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.ExamplesDll/Properties/AssemblyInfo.cs
@@ -1,29 +1,29 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 using System.Reflection;
-using System.Runtime.InteropServices;
+using System.Runtime.InteropServices;
 
 [assembly: AssemblyTitle("Apache Ignite.NET Examples Library")]
 [assembly: AssemblyDescription("")]
 [assembly: AssemblyConfiguration("")]
 [assembly: AssemblyCompany("Apache Software Foundation")]
 [assembly: AssemblyProduct("Apache Ignite.NET")]
-[assembly: AssemblyCopyright("Copyright ©  2015")]
+[assembly: AssemblyCopyright("Copyright 2016")]
 [assembly: AssemblyTrademark("")]
 [assembly: AssemblyCulture("")]
 
@@ -33,4 +33,4 @@
 
 [assembly: AssemblyVersion("1.8.0.14218")]
 [assembly: AssemblyFileVersion("1.8.0.14218")]
-[assembly: AssemblyInformationalVersion("1.8.0")]
+[assembly: AssemblyInformationalVersion("1.8.0")]
\ No newline at end of file
diff --git a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java
index 7f79c0e..37a4b74 100644
--- a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java
+++ b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java
@@ -302,6 +302,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> createCaches(Collection<CacheConfiguration> cacheCfgs) {
+        checkIgnite();
+
+        return g.createCaches(cacheCfgs);
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg, NearCacheConfiguration<K, V> nearCfg) {
         checkIgnite();
 
@@ -330,6 +337,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override public Collection<IgniteCache> getOrCreateCaches(Collection<CacheConfiguration> cacheCfgs) {
+        checkIgnite();
+
+        return g.getOrCreateCaches(cacheCfgs);
+    }
+
+    /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
         checkIgnite();
 
@@ -351,6 +365,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void destroyCaches(Collection<String> cacheNames) {
+        checkIgnite();
+
+        g.destroyCaches(cacheNames);
+    }
+
+    /** {@inheritDoc} */
     @Override public IgniteTransactions transactions() {
         checkIgnite();
 
@@ -463,7 +484,7 @@
     {
         checkIgnite();
 
-        return g.reentrantLock(name, failoverSafe, create, fair);
+        return g.reentrantLock(name, failoverSafe, fair, create);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/storm/README.txt b/modules/storm/README.txt
index eb20f25..684fdb5 100644
--- a/modules/storm/README.txt
+++ b/modules/storm/README.txt
@@ -5,7 +5,7 @@
 
 Starting data transfer to Ignite cache can be done with the following steps.
 
-1. Import Ignite Kafka Streamer Module In Maven Project
+1. Import Ignite Storm Streamer Module In Maven Project
 
 If you are using Maven to manage dependencies of your project, you can add Storm module
 dependency like this (replace '${ignite.version}' with actual Ignite version you are
diff --git a/modules/storm/pom.xml b/modules/storm/pom.xml
index 17d92cd..19165be 100644
--- a/modules/storm/pom.xml
+++ b/modules/storm/pom.xml
@@ -35,7 +35,7 @@
     <url>http://ignite.apache.org</url>
 
     <properties>
-        <storm.version>0.10.0</storm.version>
+        <storm.version>1.0.2</storm.version>
     </properties>
 
     <dependencies>
diff --git a/modules/storm/src/main/java/org/apache/ignite/stream/storm/StormStreamer.java b/modules/storm/src/main/java/org/apache/ignite/stream/storm/StormStreamer.java
index bdaec0b..1cba8ad 100644
--- a/modules/storm/src/main/java/org/apache/ignite/stream/storm/StormStreamer.java
+++ b/modules/storm/src/main/java/org/apache/ignite/stream/storm/StormStreamer.java
@@ -17,11 +17,6 @@
 
 package org.apache.ignite.stream.storm;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
 import java.util.Map;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteDataStreamer;
@@ -30,6 +25,11 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.internal.util.typedef.internal.A;
 import org.apache.ignite.stream.StreamAdapter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * Apache Storm streamer implemented as a Storm bolt.
diff --git a/modules/storm/src/test/java/org/apache/ignite/stream/storm/StormIgniteStreamerSelfTest.java b/modules/storm/src/test/java/org/apache/ignite/stream/storm/StormIgniteStreamerSelfTest.java
index 0ce4c6e..e2547b1 100644
--- a/modules/storm/src/test/java/org/apache/ignite/stream/storm/StormIgniteStreamerSelfTest.java
+++ b/modules/storm/src/test/java/org/apache/ignite/stream/storm/StormIgniteStreamerSelfTest.java
@@ -17,16 +17,6 @@
 
 package org.apache.ignite.stream.storm;
 
-import backtype.storm.Config;
-import backtype.storm.ILocalCluster;
-import backtype.storm.Testing;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.testing.CompleteTopologyParam;
-import backtype.storm.testing.MkClusterParam;
-import backtype.storm.testing.MockedSources;
-import backtype.storm.testing.TestJob;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Values;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Map;
@@ -41,6 +31,16 @@
 import org.apache.ignite.events.CacheEvent;
 import org.apache.ignite.lang.IgniteBiPredicate;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.storm.Config;
+import org.apache.storm.ILocalCluster;
+import org.apache.storm.Testing;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.testing.CompleteTopologyParam;
+import org.apache.storm.testing.MkClusterParam;
+import org.apache.storm.testing.MockedSources;
+import org.apache.storm.testing.TestJob;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Values;
 import org.jetbrains.annotations.NotNull;
 
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_PUT;
diff --git a/modules/storm/src/test/java/org/apache/ignite/stream/storm/TestStormSpout.java b/modules/storm/src/test/java/org/apache/ignite/stream/storm/TestStormSpout.java
index a006ca7..117b44b 100644
--- a/modules/storm/src/test/java/org/apache/ignite/stream/storm/TestStormSpout.java
+++ b/modules/storm/src/test/java/org/apache/ignite/stream/storm/TestStormSpout.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.stream.storm;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
 import java.util.Map;
 import java.util.TreeMap;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
 
 /**
  * Testing Storm spout.
diff --git a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/http/UriDeploymentHttpScanner.java b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/http/UriDeploymentHttpScanner.java
index 48bfd7f..bb7260d 100644
--- a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/http/UriDeploymentHttpScanner.java
+++ b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/http/UriDeploymentHttpScanner.java
@@ -343,11 +343,11 @@
                     catch (IOException e) {
                         if (!scanCtx.isCancelled()) {
                             if (X.hasCause(e, ConnectException.class)) {
-                                LT.warn(scanCtx.getLogger(), e, "Failed to connect to HTTP server " +
+                                LT.error(scanCtx.getLogger(), e, "Failed to connect to HTTP server " +
                                     "(connection refused): " + U.hidePassword(url));
                             }
                             else if (X.hasCause(e, UnknownHostException.class)) {
-                                LT.warn(scanCtx.getLogger(), e, "Failed to connect to HTTP server " +
+                                LT.error(scanCtx.getLogger(), e, "Failed to connect to HTTP server " +
                                     "(host is unknown): " + U.hidePassword(url));
                             }
                             else
@@ -404,11 +404,11 @@
             catch (IOException e) {
                 if (!scanCtx.isCancelled()) {
                     if (X.hasCause(e, ConnectException.class)) {
-                        LT.warn(scanCtx.getLogger(), e, "Failed to connect to HTTP server (connection refused): " +
+                        LT.error(scanCtx.getLogger(), e, "Failed to connect to HTTP server (connection refused): " +
                             U.hidePassword(url.toString()));
                     }
                     else if (X.hasCause(e, UnknownHostException.class)) {
-                        LT.warn(scanCtx.getLogger(), e, "Failed to connect to HTTP server (host is unknown): " +
+                        LT.error(scanCtx.getLogger(), e, "Failed to connect to HTTP server (host is unknown): " +
                             U.hidePassword(url.toString()));
                     }
                     else
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java
index eed4450..a261b98 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java
@@ -87,58 +87,64 @@
 
         assert appCtx != null;
 
-        for (CacheConfiguration cc : c.getCacheConfiguration()) {
-            // IgniteNode can not run in CLIENT_ONLY mode,
-            // except the case when it's used inside IgniteAbstractBenchmark.
-            boolean cl = args.isClientOnly() && (args.isNearCache() || clientMode);
+        CacheConfiguration[] ccfgs = c.getCacheConfiguration();
 
-            if (cl)
-                c.setClientMode(true);
+        if (ccfgs != null) {
+            for (CacheConfiguration cc : ccfgs) {
+                // IgniteNode can not run in CLIENT_ONLY mode,
+                // except the case when it's used inside IgniteAbstractBenchmark.
+                boolean cl = args.isClientOnly() && (args.isNearCache() || clientMode);
 
-            if (args.isNearCache()) {
-                NearCacheConfiguration nearCfg = new NearCacheConfiguration();
+                if (cl)
+                    c.setClientMode(true);
 
-                if (args.getNearCacheSize() != 0)
-                    nearCfg.setNearEvictionPolicy(new LruEvictionPolicy(args.getNearCacheSize()));
+                if (args.isNearCache()) {
+                    NearCacheConfiguration nearCfg = new NearCacheConfiguration();
 
-                cc.setNearConfiguration(nearCfg);
+                    if (args.getNearCacheSize() != 0)
+                        nearCfg.setNearEvictionPolicy(new LruEvictionPolicy(args.getNearCacheSize()));
+
+                    cc.setNearConfiguration(nearCfg);
+                }
+
+                cc.setWriteSynchronizationMode(args.syncMode());
+
+                if (args.orderMode() != null)
+                    cc.setAtomicWriteOrderMode(args.orderMode());
+
+                cc.setBackups(args.backups());
+
+                if (args.restTcpPort() != 0) {
+                    ConnectorConfiguration ccc = new ConnectorConfiguration();
+
+                    ccc.setPort(args.restTcpPort());
+
+                    if (args.restTcpHost() != null)
+                        ccc.setHost(args.restTcpHost());
+
+                    c.setConnectorConfiguration(ccc);
+                }
+
+                if (args.isOffHeap()) {
+                    cc.setOffHeapMaxMemory(0);
+
+                    if (args.isOffheapValues())
+                        cc.setMemoryMode(OFFHEAP_VALUES);
+                    else
+                        cc.setEvictionPolicy(new LruEvictionPolicy(50000));
+                }
+
+                cc.setReadThrough(args.isStoreEnabled());
+
+                cc.setWriteThrough(args.isStoreEnabled());
+
+                cc.setWriteBehindEnabled(args.isWriteBehind());
+
+                BenchmarkUtils.println(cfg, "Cache configured with the following parameters: " + cc);
             }
-
-            cc.setWriteSynchronizationMode(args.syncMode());
-
-            if (args.orderMode() != null)
-                cc.setAtomicWriteOrderMode(args.orderMode());
-
-            cc.setBackups(args.backups());
-
-            if (args.restTcpPort() != 0) {
-                ConnectorConfiguration ccc = new ConnectorConfiguration();
-
-                ccc.setPort(args.restTcpPort());
-
-                if (args.restTcpHost() != null)
-                    ccc.setHost(args.restTcpHost());
-
-                c.setConnectorConfiguration(ccc);
-            }
-
-            if (args.isOffHeap()) {
-                cc.setOffHeapMaxMemory(0);
-
-                if (args.isOffheapValues())
-                    cc.setMemoryMode(OFFHEAP_VALUES);
-                else
-                    cc.setEvictionPolicy(new LruEvictionPolicy(50000));
-            }
-
-            cc.setReadThrough(args.isStoreEnabled());
-
-            cc.setWriteThrough(args.isStoreEnabled());
-
-            cc.setWriteBehindEnabled(args.isWriteBehind());
-
-            BenchmarkUtils.println(cfg, "Cache configured with the following parameters: " + cc);
         }
+        else
+            BenchmarkUtils.println(cfg, "There are no caches configured");
 
         TransactionConfiguration tc = c.getTransactionConfiguration();
 
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java
new file mode 100644
index 0000000..e961439
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.cache;
+
+import java.util.Map;
+import org.apache.ignite.IgniteAtomicSequence;
+import org.apache.ignite.yardstick.IgniteAbstractBenchmark;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ * Ignite atomic sequence benchmark.
+ */
+public class IgniteAtomicSequenceBenchmark extends IgniteAbstractBenchmark {
+    /** Cache. */
+    private IgniteAtomicSequence seq;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        seq = ignite().atomicSequence("benchSequence", 0, true);
+
+        seq.batchSize(args.batch());
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        seq.incrementAndGet();
+
+        return true;
+    }
+}
diff --git a/parent/pom.xml b/parent/pom.xml
index f766437..682efa2 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -65,7 +65,7 @@
         <easymock.version>3.4</easymock.version>
         <ezmorph.bundle.version>1.0.6_1</ezmorph.bundle.version>
         <ezmorph.version>1.0.6</ezmorph.version>
-        <flume.ng.version>1.6.0</flume.ng.version>
+        <flume.ng.version>1.7.0</flume.ng.version>
         <guava.retrying.version>2.0.0</guava.retrying.version>
         <guava.version>18.0</guava.version>
         <guava14.version>14.0.1</guava14.version>
@@ -86,9 +86,7 @@
         <jsonlib.bundle.version>2.4_1</jsonlib.bundle.version>
         <jsonlib.version>2.4</jsonlib.version>
         <jtidy.version>r938</jtidy.version>
-        <kafka.bundle.version>0.9.0.0_1</kafka.bundle.version>
-        <kafka.clients.bundle.version>0.9.0.0_1</kafka.clients.bundle.version>
-        <kafka.version>0.9.0.0</kafka.version>
+        <kafka.version>0.10.0.1</kafka.version>
         <karaf.version>4.0.2</karaf.version>
         <lucene.bundle.version>3.5.0_1</lucene.bundle.version>
         <lucene.version>3.5.0</lucene.version>