IGNITE-22136 Updated Apache Ignite 3 doc with new features (#3678)

diff --git a/docs/_data/toc.yaml b/docs/_data/toc.yaml
index 326b355..6918ef0 100644
--- a/docs/_data/toc.yaml
+++ b/docs/_data/toc.yaml
@@ -14,82 +14,139 @@
 # limitations under the License.
 - title: About Apache Ignite 3
   url: index
-- title: Getting Started Guide
-  url: quick-start/getting-started-guide
 - title: Installation
-  url: installation
+  url: installation/installing-using-zip
   items:
     - title: Installing Using ZIP Archive
       url: installation/installing-using-zip
-    - title: Installing Using Docker
-      url: installation/installing-using-docker
     - title: Installing DEB or RPM package
       url: installation/deb-rpm
-- title: Ignite CLI Tool
+    - title: Installing Docker
+      url: installation/installing-using-docker
+- title: Getting Started
+  url: quick-start/getting-started-guide
+- title: Embedded Mode
+  url: quick-start/embedded-mode
+- title: GridGain CLI Tool
   url: ignite-cli-tool
-- title: Working with SQL
+- title: Developers Guide
+  url: developers-guide/table-api
   items:
-    - title: Introduction
-      url: sql/calcite-based-sql-engine
-    - title: JDBC Driver
-      url: sql/jdbc-driver
-    - title: ODBC Driver
-      url: sql/odbc/odbc-driver
+    - title: Table API
+      url: developers-guide/table-api
+    - title: Clients
+      url: developers-guide/clients/overview
       items:
+        - title: Overview
+          url: developers-guide/clients/overview
+        - title: Java Clients
+          url: developers-guide/clients/java
+        - title: .NET Clients
+          url: developers-guide/clients/dotnet
+        - title: C++ Clients
+          url: developers-guide/clients/cpp
+        - title: .NET LINQ Queries
+          url: developers-guide/clients/linq
+    - title: Working with SQL
+      url: developers-guide/sql/calcite-based-sql-engine
+      items:
+        - title: Introduction
+          url: developers-guide/sql/calcite-based-sql-engine
+        - title: JDBC Driver
+          url: developers-guide/sql/jdbc-driver
+        - title: System Views
+          url: developers-guide/sql/system-views
         - title: ODBC Driver
-          url: sql/odbc/odbc-driver
-        - title: Connection String
-          url: sql/odbc/connection-string
-        - title: Querying and Modifying Data
-          url: sql/odbc/querying-modifying-data
-    - title: Java API
-      url: sql/java
+          url: developers-guide/sql/odbc/odbc-driver
+          items:
+            - title: ODBC Driver
+              url: developers-guide/sql/odbc/odbc-driver
+            - title: Connection String
+              url: developers-guide/sql/odbc/connection-string
+            - title: Querying and Modifying Data
+              url: developers-guide/sql/odbc/querying-modifying-data
+            - title: Standard Conformance
+              url: developers-guide/sql/odbc/specification
+    - title: Tables from Java Classes
+      url: developers-guide/java-to-tables
+    - title: Distributed Computing
+      url: developers-guide/compute/compute
+    - title: Transactions
+      url: developers-guide/transactions
+    - title: Data Streaming
+      url: developers-guide/data-streamer
+    - title: Code Deployment
+      url: developers-guide/code-deployment/code-deployment
+    - title: Cache Storage
+      url: developers-guide/cache
+    - title: REST API
+      url: developers-guide/rest/rest-api
+      items:
+        - title: Overview
+          url: developers-guide/rest/rest-api
+- title: Administrator’s Guide
+  url: administrators-guide/config/config
+  items:
+    - title: GridGain Configuration
+      url: administrators-guide/config/config
+      items:
+        - title: Node Configuration Parameters
+          url: administrators-guide/config/node-config
+        - title: Cluster Configuration Parameters
+          url: administrators-guide/config/cluster-config
+        - title: Storage Configuration
+          url: administrators-guide/config/storage/persistent
+          items:
+            - title: Native Persistent Storage
+              url: administrators-guide/config/storage/persistent
+            - title: RocksDB Persistent Storage
+              url: administrators-guide/config/storage/rocksdb
+            - title: Volatile Storage
+              url: administrators-guide/config/storage/volatile
+            - title: Table Configuration
+              url: administrators-guide/config/storage/table-configuration
+        - title: Memory Quotas Configuration
+          url: administrators-guide/config/memory-quotas
+    - title: Security and Authentication
+      url: administrators-guide/security/ssl-tls
+      items:
+        - title: SSL/TLS
+          url: administrators-guide/security/ssl-tls
+        - title: Authentication
+          url: administrators-guide/security/authentication
+        - title: JWT Authentication
+          url: administrators-guide/security/jwt
+        - title: User Permissions and Roles
+          url: administrators-guide/security/permissions
+    - title: Metrics and Monitoring
+      url: administrators-guide/metrics/configuring-metrics
+      items:
+        - title: Configuring Metrics
+          url: administrators-guide/metrics/configuring-metrics
+        - title: Metrics List
+          url: administrators-guide/metrics/metrics-list
+    - title: Handling Exceptions
+      url: administrators-guide/handling-exceptions
 - title: SQL Reference
+  url: sql-reference/ddl
   items:
     - title: Data Definition Language (DDL)
       url: sql-reference/ddl
     - title: Data Manipulation Language (DML)
       url: sql-reference/dml
+    - title: Transactions
+      url: sql-reference/transactions
     - title: Distribution Zones
       url: sql-reference/distribution-zones
     - title: Supported Operators and Functions
       url: sql-reference/operators-and-functions
-- title: REST
-  items:
-    - title: Overview
-      url: rest/rest-api
-    - title: Reference
-      url: rest/reference
-- title: Clients & Drivers
-  items:
-    - title: Ignite Clients
-      url: thin-clients/index
-    - title: .NET LINQ Queries
-      url: thin-clients/linq
-- title: Distributed Computing
-  url: compute/compute
-- title: Data Region Configuration
-  url: config/data-region
-- title: Storage Configuration
-  url: storage/persistent
-  items:
-    - title: Native Persistent Storage
-      url: storage/persistent
-    - title: RocksDB Persistent Storage
-      url: storage/rocksdb
-    - title: Volatile Storage
-      url: storage/volatile
-- title: Binary Client Protocol
-  url: binary-protocol
-- title: SSL/TLS
-  url: ssl-tls
-- title: Data Rebalancing
-  url: rebalance
-- title: Performing Transactions
-  url: transactions/performing-transactions
-- title: Table Views
-  url: table-views
-- title: Handling Exceptions
-  url: handling-exceptions
+    - title: Grammar Reference
+      url: sql-reference/grammar-reference
+    - title: Data Types
+      url: sql-reference/data-types
+- title: SQL Performance Tuning
+  url: sql-tuning
+- title: General Configuration Tips
+  url: general-tips
 - title: Glossary
   url: glossary/glossary
diff --git a/docs/_docs/administrators-guide/config/cli-config.adoc b/docs/_docs/administrators-guide/config/cli-config.adoc
new file mode 100644
index 0000000..fc000d5
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/cli-config.adoc
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+== Config Parameters
+
+----
+ignite.jdbc.key-store.path=
+ignite.cluster-endpoint-url=http://127.0.1.1:10300
+ignite.jdbc.client-auth=
+ignite.rest.key-store.password=
+ignite.jdbc.key-store.password=
+ignite.rest.trust-store.path=
+ignite.jdbc.trust-store.password=
+ignite.auth.basic.username=
+ignite.jdbc-url=jdbc:ignite:thin://127.0.0.1:10800
+ignite.rest.key-store.path=
+ignite.rest.trust-store.password=
+ignite.jdbc.trust-store.path=
+ignite.auth.basic.password=
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+
+|ignite.jdbc.key-store.path||Path to the JDBC keystore file.
+|ignite.cluster-endpoint-url|http://127.0.1.1:10300|
+|ignite.jdbc.client-auth||If JDBC client authorization is enabled in CLI.
+|ignite.rest.key-store.password||
+|ignite.jdbc.key-store.password||
+|ignite.rest.trust-store.path||
+|ignite.jdbc.trust-store.password||
+|ignite.auth.basic.username||
+|ignite.jdbc-url|jdbc:ignite:thin://127.0.0.1:10800|
+|ignite.rest.key-store.path||
+|ignite.rest.trust-store.password||
+|ignite.jdbc.trust-store.path||
+|ignite.auth.basic.password||
+|======
+
+== Configuration Profiles
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/config/cluster-config.adoc b/docs/_docs/administrators-guide/config/cluster-config.adoc
new file mode 100644
index 0000000..3e24359
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/cluster-config.adoc
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Cluster Configuration Parameters
+
+Ignite 3 cluster configuration is shared across the whole cluster. Regardless of which node you apply the configuration on, it will be propogated to all nodes in the cluster.
+
+== Garbage Collection
+
+[source, json]
+----
+  "gc" : {
+    "batchSize" : 5,
+    "lowWatermark" : {
+      "dataAvailabilityTime" : 600000,
+      "updateFrequency" : 300000
+    },
+    "threads" : 16
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|batchSize|5|Garbage collection batch size.
+|lowWatermark.dataAvailabilityTime|600000|The duration outdated versions are available for.
+|lowWatermark.updateFrequency|300000|The frequency of low watermark updates.
+|threads|16|The number of threads used by garbage collector.
+|======
+
+== Metastorage Configuration
+
+[source, json]
+----
+  "metaStorage" : {
+    "idleSyncTimeInterval" : 500
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|idleSyncTimeInterval|500|How long it takes to synchronize metastorage.
+
+|======
+
+== Metrics Configuration
+
+[source, json]
+----
+  "metrics" : {
+    "exporters" : [ ]
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|exporters||The list of metric exporters currently used. For more information, see link:/metrics/configuring-metrics.adoc[Configuring Metrics].
+|======
+
+== Replication Configuration
+
+[source, json]
+----
+  "replication" : {
+    "idleSafeTimePropagationDuration" : 1000
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|idleSafeTimePropagationDuration|1000| How long it takes for replication of data to be replicated to other nodes in cluster.
+
+|======
+
+== Schema Sync Configuration
+
+[source, json]
+----
+  "schemaSync" : {
+    "delayDuration" : 1000
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|delayDuration|1000|The delay between schema being updated and synchronized across the cluster.
+
+|======
+
+== SQL Configuration
+
+[source, json]
+----
+  "sql" : {
+    "statementMemoryQuota" : "10K"
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|statementMemoryQuota|10K|The amount of memory that can be used in by a single SQL statement.
+
+|======
+
+== Transactions Configuration
+
+[source, json]
+----
+  "transaction" : {
+    "abandonedCheckTs" : 5000
+  }
+}
+----
+
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|abandonedCheckTs|5000|The amount of time after which the transaction is considered abandoned.
+|======
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/config/config.adoc b/docs/_docs/administrators-guide/config/config.adoc
new file mode 100644
index 0000000..a579112
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/config.adoc
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Cluster and Node Configuration
+
+//NOTE:UPDATE WITH SPECIFIC PARAMS FOR PUBLIC RELEASE
+
+In Ignite 3, configuration is performed by using the link:ignite-cli-tool[CLI utility]. Ignite 3 configuration is stored in HOCON format. You can manage and configure parameters at any point during cluster runtime and during cluster startup.
+
+== Updating Configuration from CLI
+
+=== Getting Cluster and Node Configuration
+
+You can get cluster configuration by using the `cluster config show`, and the configuration of the node you are connected to by using the `node config show` command.
+
+=== Updating Cluster and Node Configuration
+
+You can update the configuration by using the `cluster config update` and `node config update` commands and passing the new valid HOCON string as the parameter. Below are some examples of updating the configuration:
+
+==== Updating a single parameter
+
+Updating a single parameter is done by specifying the parameter and assigning it a new value:
+
+----
+node config update network.shutdownTimeout=20000
+----
+
+==== Updating Multiple Parameters
+
+When updating multiple parameters at once, pass the valid HOCON configuration to Ignite. The CLI tool will then parse it and apply all required change at the same time.
+
+----
+cluster config update "{security.authentication.providers:[{name:basic,password:admin_password,type:basic,username:admin_user,roles:[admin]}],security.authentication.enabled:true}"
+----
+
+== Configuration Files
+
+When a Ignite node starts, it reads its starting configuration from the `etc/ignite-config.conf` file.  You can change the file to always consistently start a node with specified configuration.
+
+Cluster configuration is stored on the cluster nodes, and is shared to all nodes in cluster automatically. You should use the CLI tool to manage this configuration.
+
+Ignite also uses a number of environmental parameters to define properties not related to node or cluster operations. When a node starts, it loads these parameters from the `etc/vars.env` file. Edit this file to configure locations of work-related folders, JVM properties and additional JVM arguments through the `IGNITE3_EXTRA_JVM_ARGS` parameter.
diff --git a/docs/_docs/administrators-guide/config/memory-quotas.adoc b/docs/_docs/administrators-guide/config/memory-quotas.adoc
new file mode 100644
index 0000000..8d1b59a
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/memory-quotas.adoc
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Memory Quotas Configuration
+
+Memory quotas can be used to prevent Ignite nodes from running out of memory when executing SQL queries that return large result sets. A query loads objects from caches into memory. If there are too many objects, the objects are too large, or multiple large queries are executed on the node at the same time, the JVM can run out of memory. When memory quotas are configured, the SQL engine imposes a limit on the heap memory available to queries.
+
+Quotas are configured for each node individually. You can have different limits on different nodes, depending on the amount of RAM available to the nodes. Additionally, you can set a cluster-wide limit to how much of the memory quota can be used up by a single SQL query.
+
+
+Memory quota size can be configured in:
+
+- Kilobytes - append 'K' or 'k', for example: 10k, 400K;
+- Megabytes - append 'M' or 'm', for example: 60m, 420M;
+- Gigabytes - append 'G' or 'g', for example: 7g, 2G;
+- Percent of heap - append the '%' sign, for example: 45%, 80%.
+
+== Node-Wide Memory Quota
+
+By default, a quota for SQL queries is set to 60% of the heap memory available to the node. You can change this limit by setting the `sql.nodeMemoryQuota` node configuration property. To disable the memory quota, set it to 0. You can use the CLI tool to set it:
+
+[source, bash]
+----
+node config update --node-url http://localhost:10300 {sql.nodeMemoryQuota:"1000M"}
+----
+
+If the node memory quota is exceeded, the query is interrupted and the `SQL query ran out of memory: Node quota was exceeded` error is returned.
+
+== Query-Wide Memory Quota
+
+By default, each individual query can use the entire memory quota. This may be undesirable in environments that run multiple large queries in parallel. You can use the `sql.statementMemoryQuota` cluster configuration property to limit the amount of memory that can be allocated to a single query.
+
+[source, bash]
+----
+cluster config update --cluster-endpoint-url http://localhost:10300 {sql.statementMemoryQuota:"10M"}
+----
+
+If the statement memory quota is exceeded, the query is interrupted and the `SQL query ran out of memory: Statement quota was exceeded` error is returned.
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/config/node-config.adoc b/docs/_docs/administrators-guide/config/node-config.adoc
new file mode 100644
index 0000000..06682e1
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/node-config.adoc
@@ -0,0 +1,339 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Node Configuration Parameters
+
+Node configuration is individual for each node and is not shared across the whole cluster.
+
+== Storage Configuration
+
+See the link:/config/storage/persistent[Persistent storage] section for information on storage configuration settings.
+
+== Client Connector
+
+See the link:/clients/overview[Clients] section for information on configuring client connector.
+
+== Cluster Configuration
+
+[source, json]
+----
+  "cluster" : {
+    "networkInvokeTimeout" : 500
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|networkInvokeTimeout|500|Timeout for network requests within the cluster.
+|======
+
+
+== Compute Configuration
+
+[source, json]
+----
+  "compute" : {
+    "queueMaxSize" : 2147483647,
+    "statesLifetimeMillis" : 60000,
+    "threadPoolSize" : 64,
+    "threadPoolStopTimeoutMillis" : 10000
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|queueMaxSize|2147483647|Maximum number of compute tasks in queue.
+|statesLifetimeMillis|60000|The lifetime of job states after the job finishes, in milliseconds.
+|threadPoolSize|64|The number of threads available to compute jobs.
+|threadPoolStopTimeoutMillis|10000| Job thread pool stop timeout, in milliseconds.
+|======
+
+
+== Code Deployment Configuration
+
+[source, json]
+----
+  "deployment" : {
+    "deploymentLocation" : "deployment"
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|deploymentLocation|deployment|Relative path to folder in the working directory. All deployment units content will be stored there.
+|======
+
+
+== Expiration Configuration
+
+[source, json]
+----
+  "expiration" : {
+    "batchSize" : 1000,
+    "checkFrequency" : 600000,
+    "parallelismLevel" : 1
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|batchSize|1000|The number of items that can be expired at once.
+|checkFrequency|600000|How often the data is checked for expiration.
+|parallelismLevel|1|The number of threads used for data expiry.
+|======
+
+
+== Network Configuration
+
+[source, json]
+----
+  "network" : {
+    "fileTransfer" : {
+      "chunkSize" : 1048576,
+      "maxConcurrentRequests" : 4,
+      "responseTimeout" : 10000,
+      "threadPoolSize" : 8
+    },
+    "inbound" : {
+      "soBacklog" : 128,
+      "soKeepAlive" : true,
+      "soLinger" : 0,
+      "soReuseAddr" : true,
+      "tcpNoDelay" : true
+    },
+    "membership" : {
+      "failurePingInterval" : 1000,
+      "membershipSyncInterval" : 30000,
+      "scaleCube" : {
+        "failurePingRequestMembers" : 3,
+        "gossipInterval" : 200,
+        "gossipRepeatMult" : 3,
+        "membershipSuspicionMultiplier" : 5,
+        "metadataTimeout" : 3000
+      }
+    },
+    "nodeFinder" : {
+      "netClusterNodes" : [ "localhost:3344" ],
+      "type" : "STATIC"
+    },
+    "outbound" : {
+      "soKeepAlive" : true,
+      "soLinger" : 0,
+      "tcpNoDelay" : true
+    },
+    "port" : 3344,
+    "shutdownQuietPeriod" : 0,
+    "shutdownTimeout" : 15000,
+    "ssl" : {
+      "ciphers" : "",
+      "clientAuth" : "none",
+      "enabled" : false,
+      "keyStore" : {
+        "password" : "********",
+        "path" : "",
+        "type" : "PKCS12"
+      },
+      "trustStore" : {
+        "password" : "********",
+        "path" : "",
+        "type" : "PKCS12"
+      }
+    }
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|fileTransfer||File transfer configuration.
+|fileTransfer.chunkSize|1048576|Chunk size in bytes.
+|fileTransfer.maxConcurrentRequests|4|Maximum number of concurrent requests.
+|fileTransfer.responseTimeout|10000|Node response timeout during file transfer.
+|fileTransfer.threadPoolSize|8|File sender thread pool size.
+|inbound||Server socket configuration. See link:https://man7.org/linux/man-pages/man7/tcp.7.html[TCP documentation] and link:https://man7.org/linux/man-pages/man7/socket.7.html[socket documentaion] for more information.
+|inbound.soBacklog|128| The size of the backlog.
+|inbound.soKeepAlive|true| Defines if the keep-alive packets are allowed.
+|inbound.soLinger|0| Defines how long the closed socket should linger.
+|inbound.soReuseAddr|true| Defines if the address can be reused.
+|inbound.tcpNoDelay|true| Defines if the TCP no delay option is used.
+|membership||Node membership configuration.
+|membership.failurePingInterval|1000| Failure detector ping interval.
+|membership.membershipSyncInterval|30000|Periodic membership data synchronization interval.
+|membership.scaleCube|| ScaleCube-specific configuration.
+|scaleCube.failurePingRequestMembers|3|Number of members that are randomly selected by a cluster node for an indirect ping request.
+|scaleCube.gossipInterval|200|link:https://en.wikipedia.org/wiki/Gossip_protocol[Gossip] spreading interval.
+|scaleCube.gossipRepeatMult|3|Gossip repeat multiplier.
+|scaleCube.membershipSuspicionMultiplier|5|The multiplier that is used to calculate the timeout after which the node is considered dead.
+|scaleCube.metadataTimeout|3000|The timeout on metadata update operation, in milliseconds.
+|nodeFinder||Configuration for how the node finds other nodes in the cluster.
+|nodeFinder.netClusterNodes|localhost:3344|Addresses of all nodes in the cluster in a host:port format.
+|nodeFinder.type|STATIC|Node finder type.
+|outbound||Outbound request configuration.
+|outbound.soKeepAlive|true| Defines if the keep-alive packets are allowed.
+|outbound.soLinger|0|Defines how long the closed socket should linger.
+|outbound.tcpNoDelay|true| Defines if the TCP no delay option is used.
+|port|3344|Node port.
+|shutdownQuietPeriod|0| The period during node shutdown when Ignite ensures that no tasks are submitted for the before the node shuts itself down. If a task is submitted during this period, it is guaranteed to be accepted.
+|shutdownTimeout|15000|The maximum amount of time until the node is shut down regardless of if new network messages were submitted during shutdownQuietPeriod.
+|ssl.ciphers||Explicitly set node ssl cipher.
+|ssl.clientAuth|none|Client authorization used by the node, if any.
+|ssl.enabled|false|Defines if SSL is enabled for the node.
+|ssl.keyStore|| SSL keystore configuration.
+|keyStore.password||Keystore password.
+|keyStore.path||Path to the keystore.
+|keyStore.type|PKCS12|Keystore type.
+|ssl.port||Port used for SSL connections.
+|ssl.trustStore||SSL trustsore configuration.
+|trustStore.password||Truststore password.
+|trustStore.path||Path to the truststore.
+|trustStore.type|PKCS12|Truststore type.
+
+
+|======
+
+
+== Node Attributes
+
+[source, json]
+----
+  "nodeAttributes" : {
+    "nodeAttributes" : [ ]
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|nodeAttributes||The list of node attributes used for data region configuration.
+|======
+
+
+== Raft Configuration
+
+[source, json]
+----
+  "raft" : {
+    "fsync" : true,
+    "responseTimeout" : 3000,
+    "retryDelay" : 200,
+    "retryTimeout" : 10000,
+    "rpcInstallSnapshotTimeout" : 300000,
+    "volatileRaft" : {
+      "logStorage" : {
+        "name" : "unlimited"
+      }
+    }
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|fsync|true|Defines if fsync will be used to transfer data in the cluster.
+|responseTimeout|3000|Raft group response timeout.
+|retryDelay|200|The delay before the request is retried.
+|retryTimeout|10000|The timeout after which the request is considered timed out.
+|rpcInstallSnapshotTimeout|300000|The maximum allowed duration from sending InstallSnapshot request and getting a response to it. During it, the snapshot must be fully transferred to a recipient and installed.
+|volatileRaft.logStorage.name||The name of the log storage used by the node.
+
+|======
+
+
+== REST Configuration
+
+[source, json]
+----
+  "rest" : {
+    "dualProtocol" : false,
+    "httpToHttpsRedirection" : false,
+    "port" : 10300,
+    "ssl" : {
+      "ciphers" : "",
+      "clientAuth" : "none",
+      "enabled" : false,
+      "keyStore" : {
+        "password" : "********",
+        "path" : "",
+        "type" : "PKCS12"
+      },
+      "port" : 10400,
+      "trustStore" : {
+        "password" : "********",
+        "path" : "",
+        "type" : "PKCS12"
+      }
+    }
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+|dualProtocol|false|Defines if both HTTP and HTTPS protocols are used by the endpoint.
+|httpToHttpsRedirection|false|Defines if requests to HTTP endpoint will be redirected to HTTPS.
+|port|10300|The port of the node's REST endpoint.
+|ssl.ciphers||Explicitly set node ssl cipher.
+|ssl.clientAuth|none|Client authorization used by the node, if any.
+|ssl.enabled|false|Defines if SSL is enabled for the node
+|ssl.keyStore|| SSL keystore configuration.
+|keyStore.password||Keystore password.
+|keyStore.path||Path to the keystore.
+|keyStore.type|PKCS12|Keystore type.
+|ssl.port|10400|Port used for SSL connections.
+|ssl.trustStore||SSL trustsore configuration.
+|trustStore.password||Truststore password.
+|trustStore.path||Path to the truststore.
+|trustStore.type|PKCS12|Truststore type.
+|======
+
+
+== SQL Configuration
+
+[source, json]
+----
+  "sql" : {
+    "nodeMemoryQuota" : "1000g"
+  },
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+
+|nodeMemoryQuota|1000g| Node-wide limit for memory that can be used for SQL queries.
+
+|======
+
+
+== Storage Profiles Configuration
+
+[source, json]
+----
+  "storageProfiles" : {
+    "storageProfiles" : [ ]
+  }
+}
+----
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+
+|storageProfiles||The list of available storage profiles.
+
+|======
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/config/storage/persistent.adoc b/docs/_docs/administrators-guide/config/storage/persistent.adoc
new file mode 100644
index 0000000..e85bf63
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/storage/persistent.adoc
@@ -0,0 +1,78 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Persistent Storage
+
+== Overview
+
+Apache Ignite Persistence is designed to provide a quick and responsive persistent storage.
+When using the persistent storage, Ignite stores all the data on disk, and loads as much data as it can into RAM for processing.
+
+When persistence is enabled, Ignite stores each partition in a separate file on disk. In addition to data partitions, Ignite stores indexes and metadata.
+
+== Checkpointing
+
+_Checkpointing_ is the process of copying dirty pages from RAM to partition files on disk. A dirty page is a page that was updated in RAM but was not written to the respective partition file.
+
+After a checkpoint is created, all changes are persisted to disk and will be available if the node crashes and is restarted.
+
+Checkpointing is designed to ensure durability of data and recovery in case of a node failure.
+
+
+This process helps to utilize disk space frugally by keeping pages in the most up-to-date state on disk.
+
+
+== Configuration Properties
+
+The following table describes some properties of persistent data storage:
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+|Property|Default|Description
+
+|name|| The name of the data region.
+|size|`256 * 1024 * 1024`| Sets the space allocated to the data region, in bytes.
+|replacementMode|`CLOCK`| Sets the page replacement algorithm.
+|pageSize|16384| The size of pages in the storage, in bytes.
+|memoryAllocator.type|unsafe|Memory allocator configuration. Uses `sun.misc.Unsafe` to improve performance. Currently, no other options are available.
+|===
+
+The table below describes checkpoint configuration:
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+|Property|Default|Description
+|checkpointDelayMillis|200| Delay before staring a checkpoint after receiving the command.
+|checkpointThreads|4| Number of CPU threads dedicated to checkpointing.
+|compactionThreads|4| Number of CPU threads dedicated to data compaction.
+|frequency|180000|Checkpoint frequency in milliseconds.
+|frequencyDeviation|40| Allowed deviation in checkpoint frequency, in milliseconds.
+|logReadLockThresholdTimeout|0| Threshold for logging long read locks, in milliseconds.
+|readLockTimeout|10000| Timeout for checkpoint read lock acquisition, in milliseconds.
+|useAsyncFileIoFactory|true| If Ignite uses asynchronous file I/O operations provider.
+|===
+
+== Configuration Example
+
+The example below shows how to configure one data region that uses Ignite persistence:
+
+----
+ignite config set --type cluster \
+"{
+    aipersist.regions: [{
+        name: btree_persistent_region,
+        maxSize: 256000000
+    }]
+}"
+----
\ No newline at end of file
diff --git a/docs/_docs/storage/rocksdb.adoc b/docs/_docs/administrators-guide/config/storage/rocksdb.adoc
similarity index 87%
rename from docs/_docs/storage/rocksdb.adoc
rename to docs/_docs/administrators-guide/config/storage/rocksdb.adoc
index 8481213..69630e6 100644
--- a/docs/_docs/storage/rocksdb.adoc
+++ b/docs/_docs/administrators-guide/config/storage/rocksdb.adoc
@@ -28,10 +28,13 @@
 [cols="1,1,3",opts="header", stripes=none]
 |===
 |Property|Default|Description
+
 |name|| The name of the data region.
 |size| `256 * 1024 * 1024` | Size of the offheap cache.
 |writeBufferSize | `64 * 1024 * 1024` | Size of the write buffer.
+|cache| `lru` | The type of the cache to use. Currently only lru is supported. Using `clock` cache is not recommended.
 |numShardBits| `-1` | The number of parts the cache is sharded to.
+|flushDelayMillis |100| The delay before handling RocksDB flush events.
 |===
 
 
@@ -44,7 +47,8 @@
 "{
     rocksDb.regions: [{
         name: lsm_region,
-        size: 256000000
+        size: 256000000,
+        cache: lru
     }]
 }"
-----
+----
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/config/storage/table-configuration.adoc b/docs/_docs/administrators-guide/config/storage/table-configuration.adoc
new file mode 100644
index 0000000..04d0d7f
--- /dev/null
+++ b/docs/_docs/administrators-guide/config/storage/table-configuration.adoc
@@ -0,0 +1,62 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Table Configuration
+
+In Ignite 3, you can set up cluster-wide policies for how tables should be handles. Additionally, you can configure the properties of individual tables and indexes to improve performance under your load.
+
+== Generic Settings
+
+Generic configuration settings are applied to all tables and indexes in the cluster. Below is the list of properties:
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+|Property|Default|Description
+
+|gcThreads|16| Number of CPU threads dedicated to garbage collection.
+|globalIdCounter|0|Current global ID of the table. Incremented automatically when a new table is created.
+|lowWatermark.dataAvailabilityTime|2700000|The duration deleted data is kept in database, in milliseconds.
+|lowWatermark.updateFrequency|300000| Low watermark update frequency (in milliseconds).
+|===
+
+
+== Table Configuration
+
+Table configuration parameters can be used to fine tune table parameters manually to improve performance.
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+|Property|Default|Description
+
+|name||The name of the table.
+|tableId|1|The ID of the table.
+|zoneId|0|The ID of the distribution zone the table belongs to.
+|columns||The list of columns in the table.
+|primaryKey||The table's primary key.
+
+|===
+
+== Index Configuration
+
+Index configuration parameters can be used to fine tune index parameters manually to improve performance.
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+|Property|Default|Description
+
+|type||The name of the index type.
+|id||Index ID.
+|name||Index name.
+|tableId||The ID of the table the index.
+|===
\ No newline at end of file
diff --git a/docs/_docs/storage/volatile.adoc b/docs/_docs/administrators-guide/config/storage/volatile.adoc
similarity index 74%
rename from docs/_docs/storage/volatile.adoc
rename to docs/_docs/administrators-guide/config/storage/volatile.adoc
index c13d7c1..bfeed11 100644
--- a/docs/_docs/storage/volatile.adoc
+++ b/docs/_docs/administrators-guide/config/storage/volatile.adoc
@@ -12,26 +12,29 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-
 = Volatile Storage
 
 == Overview
 
-Apache Ignite Volatile storage is designed to provide a quick and responsive storage without guarantees of data persistence.
+Ignite Volatile storage is designed to provide a quick and responsive storage without guarantees of data persistence.
 
 
-When it is enabled for the link:config/data-region[data region], Ignite stores all data in the data region in RAM. Data will be lost on cluster shutdown, so make sure to have a separate data region for persistent storage.
+When it is enabled for the data region, Ignite stores all data in the data region in RAM. Data will be lost on cluster shutdown, so make sure to have a separate data region for persistent storage.
 
 == Configuration Parameters
 
 [cols="1,1,3",opts="header", stripes=none]
 |===
+|Property|Default|Description
+
 |name|| The name of the data region.
 |initSize|`256 * 1024 * 1024`| Sets the initial space allocated to the data region.
 |maxSize|`256 * 1024 * 1024`| Sets the maximum space that can be allocated to the data region.
 |evictionMode|`DISABLED`| Sets the eviction algorithm to use. Possible values: `DISABLED`, `RANDOM_LRU`, `RANDOM_2_LRU`.
 |evictionThreshold|`0.9`| Configures when the eviction process starts.
 |emptyPagesPoolSize|100| The maximum number of empty pages Ignite will try to keep.
+|pageSize|16384| The size of pages in the storage, in bytes.
+|memoryAllocator.type|unsafe|Memory allocator configuration. Uses `sun.misc.Unsafe` to improve performance. Currently, no other options are available.
 |===
 
 
diff --git a/docs/_docs/administrators-guide/handling-exceptions.adoc b/docs/_docs/administrators-guide/handling-exceptions.adoc
new file mode 100644
index 0000000..3a9a654
--- /dev/null
+++ b/docs/_docs/administrators-guide/handling-exceptions.adoc
@@ -0,0 +1,266 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+= Exceptions
+
+This section outlines basic exceptions that can be generated by Ignite 3 and provides basic instructions for handling them.
+
+== Finding Stack Trace Information
+
+When the exception happens, Ignite 3 provides a UUID of the specific exception, but not a full stack trace. For a full stack trace, check cluster logs.
+
+== Common Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-CMN-1`|Operation was stopped because node is stopping.
+|`IGN-CMN-2`|Operation was stopped because the component is not started.
+|`IGN-CMN-3`|Operation failed because an illegal argument or argument in a wrong format has been passed.
+|`IGN-CMN-4`|Operation failed because SSL could not be configured.
+|`IGN-CMN-5`|Operation failed because a node has left the cluster.
+|===
+
+== Table Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-TBL-1`|Table already exists.
+|`IGN-TBL-2`|Table not found.
+|`IGN-TBL-3`|Column already exists.
+|`IGN-TBL-4`|Column not found.
+|`IGN-TBL-5`|Table is already stopping.
+|`IGN-TBL-6`|Table definition not correct.
+|`IGN-TBL-7`|Schema version mismatch.
+|===
+
+== Client Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-CLIENT-1`|Connection to client failed.
+|`IGN-CLIENT-2`|An issue occurred with connection protocol.
+|`IGN-CLIENT-3`|Incompatible protocol version.
+|`IGN-CLIENT-4`|Failed to find the table by ID.
+|`IGN-CLIENT-5`|An error occurred during authentication.
+|`IGN-CLIENT-6`|An error occurred during authorization.
+|`IGN-CLIENT-7`|Client configuration error.
+|`IGN-CLIENT-8`|Cluster ID mismatch error.
+|`IGN-CLIENT-9`|Client SSL configuration error.
+|`IGN-CLIENT-10`|Client handshake header error.
+|===
+
+== SQL  Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-SQL-1`|Failed to read a page after last existing one.
+|`IGN-SQL-2`|Failed to read response from a query that does not return rows.
+|`IGN-SQL-3`|Schema not found error.
+|`IGN-SQL-4`|Cursor is already closed error.
+|`IGN-SQL-5`|Statement parsing error. SQL string is not grammatically valid.
+|`IGN-SQL-6`|Statement validation error.SQL string is not semantically valid or contains prohibited operations.
+|`IGN-SQL-7`|Constraint violation error, for example, primary key violation.
+|`IGN-SQL-8`|Statement got cancelled.
+|`IGN-SQL-9`|Runtime error. Usually caused an SQL error, for example numeric overflow.
+|`IGN-SQL-10`|Planning timed out without finding any valid plan.
+|`IGN-SQL-11`|Operation got rejected because SQL session was closed.
+|`IGN-SQL-12`|SQL engine was unable to map query on current cluster topology. Most likely, the partition or view is not available because all nodes containing it are offline.
+|`IGN-SQL-13`|Execution of transaction control statement inside an external transaction is forbidden.
+|===
+
+== Meta Storage Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-META-1`|Failed to start the underlying key value storage.
+|`IGN-META-2`|Failed to restore the underlying key value storage.
+|`IGN-META-3`|Failed to compact the underlying key value storage.
+|`IGN-META-4`|Failed to perform an operation on the underlying key value storage.
+|`IGN-META-5`|Failed to perform an operation within a specified time period.
+|===
+
+== Index Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-IDX-1`|Invalid index definition.
+|`IGN-IDX-2`|Failed to find the specified index.
+|`IGN-IDX-3`|Specified index already exists.
+|===
+
+== Transactions Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-TX-1`|Default error for transaction state storage.
+|`IGN-TX-2`|Transaction state storage is stopped.
+|`IGN-TX-3`|Unexpected transaction state on state change.
+|`IGN-TX-4`|Failed to acquire a lock on a key due to a conflict.
+|`IGN-TX-5`|Failed to acquire a lock on a key within the timeout.
+|`IGN-TX-6`|Failed to commit a transaction.
+|`IGN-TX-7`|Failed to roll back a transaction.
+|`IGN-TX-8`|Failed to put read-write operation into read-only transaction.
+|`IGN-TX-9`|Replica is not ready to handle a request.
+|`IGN-TX-10`|Transaction state storage rebalancing error.
+|`IGN-TX-11`|Failed to create a read-only transaction with a timestamp older than the data available in the tables.
+|`IGN-TX-12`|Failure due to an incompatible schema change.
+|`IGN-TX-13`|Failure due to an abandoned transaction.
+|`IGN-TX-14`|Failure due to primary replica expiration.
+|`IGN-TX-15`|Coordinator tried to commit a transaction that has already been aborted.
+|===
+
+== Replicator Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-REP-1`|Default error for the replication procedure.
+|`IGN-REP-2`|Replica with the same identifier is already existed.
+|`IGN-REP-3`|Timeout has happened during the replication procedure.
+|`IGN-REP-4`|The error happens when the replication level try to handle an unsupported request.
+|`IGN-REP-5`|The error happens when the replica is not ready to handle a request.
+|`IGN-REP-6`|The error happens when the replica is not the current primary replica.
+|`IGN-REP-7`|Failed to close cursor.
+|`IGN-REP-8`|Stopping replica exception code.
+|`IGN-REP-9`|Replication safe time reordering.
+
+|===
+
+
+== Storage Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-STORAGE-1`|Default error code for storage exceptions.
+|`IGN-STORAGE-2`|Failed to create a directory.
+|`IGN-STORAGE-3`|Operation on closed storage.
+|`IGN-STORAGE-4`|Storage rebalancing error.
+|===
+
+
+== Distribution Zone Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-DISTRZONES-1`|Distribution zone already exists.
+|`IGN-DISTRZONES-2`|Distribution zone is not found.
+|`IGN-DISTRZONES-3`|Distribution zone renaming error.
+|`IGN-DISTRZONES-4`|Distribution zone is a default distribution zone or bound to table.
+|`IGN-DISTRZONES-5`|Distribution zone definition error.
+
+|===
+
+
+== Network Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-NETWORK-1`|Unresolvable consistent ID.
+|`IGN-NETWORK-2`|Port is already in use.
+|===
+
+== Node Configuration Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-NODECFG-1`|Failed to read configuration.
+|`IGN-NODECFG-2`|Failed to create a configuration file.
+|`IGN-NODECFG-3`|Failed to write configuration.
+|`IGN-NODECFG-4`|Failed to parse configuration.
+|===
+
+
+== Code Deployment Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-CODEDEPLOY-1`|Access attempt to a non-existing deployment unit.
+|`IGN-CODEDEPLOY-2`|Duplicate deployment unit.
+|`IGN-CODEDEPLOY-3`|Deployment unit content read error.
+|`IGN-CODEDEPLOY-4`|Deployment unit is unavailable for computing.
+|===
+
+== Garbage Collector Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-GC-1`|Garbage collector closed.
+|===
+
+== Authentication Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-AUTHENTICATION-1`|Authentication error caused by unsupported authentication type.
+|`IGN-AUTHENTICATION-2`|Authentication error caused by invalid credentials.
+|===
+
+== Compute Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-COMPUTE-1`|Classpath error.
+|`IGN-COMPUTE-1`|Class loader error.
+|===
+
+== Catalog Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-CATALOG-1`|Command to the catalog has not passed the validation. See exception message for details.
+|===
+
+== Placement Driver Exceptions
+
+[cols="20%,80%", width="100%",opts="header"]
+|===
+|Exception	|Description
+
+|`IGN-PLACEMENTDRIVER-1`|Primary replica await timeout error.
+|`IGN-PLACEMENTDRIVER-2`|Primary replica await error.
+|===
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/metrics/configuring-metrics.adoc b/docs/_docs/administrators-guide/metrics/configuring-metrics.adoc
new file mode 100644
index 0000000..60cd570
--- /dev/null
+++ b/docs/_docs/administrators-guide/metrics/configuring-metrics.adoc
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Configuring Metrics
+
+Metrics collection might affect the performance of an application. So, by default all metric sources are disabled.
+
+== Metrics Management
+
+All metric management is performed through the link:ignite-cli-tool[Ignite CLI tool].
+
+=== Checking Metric Sources and Enabled Metrics
+
+The Ignite CLI tool can list all available metric sources:
+
+[source, bash]
+----
+node metric source list
+----
+
+This command lists all metric sources. To check the list of metrics, enable the metric source you need and then use the following command:
+
+[source, bash]
+----
+node metric list
+----
+
+This command will return the list of all currently available metrics, organized with their exporters.
+
+=== Enabling Metric Sources
+
+Metric sources are enabled on per-node basis. You can specify the node interact with by using the `-u` parameter to specify node URL or `-n` parameter to specify node name. For example, here is how you can enable `jvm` metric source:
+
+[source, bash]
+----
+node metric source enable -n=defaultNode jvm
+----
+
+=== Disabling Metric Sources
+
+Metric sources are also disabled on per-node basis. You can specify the node interact with by using the `-u` parameter to specify node URL or `-n` parameter to specify node name. For example, here is how you can enable `jvm` metric source:
+
+[source, bash]
+----
+node metric source disable -n=defaultNode jvm
+----
+
+== Accessing Metrics
+
+// Might add about  -Dcom.sun.management.jmxremote.port=1099 as a way to hard link port later
+
+You can work with metrics by using any external tool. To access the metrics, you need to enable the jmx exporter in cluster configuration:
+
+----
+сluster config update metrics.exporters.jmx.exporterName=jmx
+----
+
+After you do, monitoring tools will be able to collect enabled metrics from the node:
+
+image::images/jmc-metrics.png[]
\ No newline at end of file
diff --git a/docs/_docs/administrators-guide/metrics/metrics-list.adoc b/docs/_docs/administrators-guide/metrics/metrics-list.adoc
new file mode 100644
index 0000000..765ae0f
--- /dev/null
+++ b/docs/_docs/administrators-guide/metrics/metrics-list.adoc
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Available Metrics
+
+This topic lists all metrics available in Ignite 3.
+
+== client.handler
+
+The metrics provided by the client handler and related to active clients.
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+
+|ConnectionsInitiated|Total initiated connections.
+|SessionsAccepted|Total accepted sessions.
+|SessionsActive|The number of currently active sessions.
+|SessionsRejected|Total sessions rejected due to handshake errors.
+|SessionsRejectedTls|Total sessions rejected due to TLS handshake errors.
+|SessionsRejectedTimeout|Total sessions rejected due to timeout.
+|BytesSent|Total bytes sent.
+|BytesReceived|Total bytes received.
+|RequestsActive|Requests in progress.
+|RequestsProcessed|Total processed requests.
+|RequestsFailed|Total failed requests.
+|TransactionsActive|Active transactions.
+|CursorsActive|Active cursors.
+|=======================================================================
+
+== jvm
+
+The metrics for Ignite Java Virtual Machine resource use.
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+
+|memory.heap.Committed|Committed amount of heap memory.
+|memory.non-heap.Init|Initial amount of non-heap memory.
+|memory.heap.Max|Maximum amount of heap memory.
+|memory.heap.Used|Currently used amount of heap memory.
+|memory.non-heap.Max|Maximum amount of non-heap memory.
+|memory.non-heap.Committed|Committed amount of non-heap memory.
+|memory.non-heap.Used|Used amount of non-heap memory.
+|memory.heap.Init|Initial amount of heap memory.
+|=======================================================================
+
+== sql.client
+
+SQL client metrics.
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+|OpenCursors | Number of currently open cursors.
+|=======================================================================
+
+== sql.plan.cache
+
+Metrics for SQL cache planning.
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+|Hits | Cache plan hits.
+|Misses | Cache plan misses.
+|=======================================================================
\ No newline at end of file
diff --git a/docs/_docs/binary-protocol.adoc b/docs/_docs/binary-protocol.adoc
deleted file mode 100644
index 1242281..0000000
--- a/docs/_docs/binary-protocol.adoc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Binary Client Protocol
-
-Ignite binary client protocol enables user applications to communicate with an existing Ignite cluster without starting a full-fledged Ignite node.
-An application can connect to the cluster through a raw TCP socket. Once the connection is established, the application can communicate with the Ignite cluster and perform cache operations using the established format.
-
-This Beta release implementation of binary client protocol comes with the following improvements:
-
-* Implemented link:https://cwiki.apache.org/confluence/display/IGNITE/IEP-75+Thin+Client+MsgPack+Serialization[MsgPack,window=_blank] as an underlying binary format;
-* Added new set of data types;
-* Added more flexible and extensible handshake with the magic header string.
-
-For more information on binary client protocol, please click link:https://cwiki.apache.org/confluence/display/IGNITE/IEP-76+Thin+Client+Protocol+for+Ignite+3.0[here,window=_blank].
\ No newline at end of file
diff --git a/docs/_docs/compute/compute.adoc b/docs/_docs/compute/compute.adoc
deleted file mode 100644
index f9c872d..0000000
--- a/docs/_docs/compute/compute.adoc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Distributed Computing
-
-Apache Ignite 3 provides an API for distributing computations across cluster nodes in a balanced and fault-tolerant manner. You can submit individual tasks for execution from Java and .NET clients.
-
-You can use Java or .NET client to execute compute jobs. Make sure the required classes are deployed to the cluster before executing code.
-
-Here is how you can execute a simple compute job:
-
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-private void example() {
-    IgniteClient client = client();
-    IgniteCompute compute = client.compute();
-    Set<ClusterNode> nodes = new HashSet<>(client.clusterNodes());
-
-    compute.execute(nodes, NodeNameJob.class, "Hello");
-}
-
-private static class NodeNameJob implements ComputeJob<String> {
-    @Override
-    public String execute(JobExecutionContext context, Object... args) {
-        return context.ignite().name() + "_" + args[0];
-    }
-}
-----
-
-
-NOTE: Unlike Ignite 2, jobs are not serialized. Only the class name and arguments are sent to the node.
-
-tab:.NET[]
-[source, csharp]
-----
-IIgniteClient client = Client;
-ICompute compute = client.Compute;
-IList<IClusterNode> nodes = await Client.GetClusterNodesAsync();
-string res = await compute.ExecuteAsync<string>(nodes, jobClassName: "org.foo.bar.NodeNameJob", "Hello!");
-----
---
-
-
-== Colocated Computations
-
-In Apache Ignite 3 you can execute colocated computation with `executeColocated` method. When you do it, the compute task is guaranteed to be executed on the nodes that hold the specified key. This can significantly reduce execution time if your tasks require data.
-
-In this example we will need a table to colocate:
-
-
-
-[source, java]
-----
-executeSql("CREATE TABLE test (k int, v int, CONSTRAINT PK PRIMARY KEY (k))");
-executeSql("INSERT INTO test(k, v) VALUES (1, 101)");
-----
-
-And we will execute a simple task:
-
-----
-class GetNodeNameJob implements ComputeJob<String> {
-    @Override
-    public String execute(JobExecutionContext context, Object... args) {
-        return context.ignite().name();
-    }
-}
-----
-
-
-[tabs]
---
-tab:Tuple[]
-[source, java]
-----
-String actualNodeName = ignite.compute()
-        .executeColocated("PUBLIC.test", Tuple.create(Map.of("k", 1)), GetNodeNameJob.class)
-        .get(1, TimeUnit.SECONDS);
-
-System.out.println(actualNodeName);
-----
-
-tab:Mapper[]
-[source, java]
-----
-String actualNodeName = ignite.compute()
-        .executeColocated("PUBLIC.test", 1, Mapper.of(Integer.class), GetNodeNameJob.class)
-        .get(1, TimeUnit.SECONDS);
-
-System.out.println(actualNodeName);
-----
---
\ No newline at end of file
diff --git a/docs/_docs/config/data-region.adoc b/docs/_docs/config/data-region.adoc
deleted file mode 100644
index 9e28ab9..0000000
--- a/docs/_docs/config/data-region.adoc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Data Region Configuration
-
-You can set configuration for data regions by using HOCON format. You can configure your data regions to run either RocksDB or B+Tree storage, and fine-tune their performance. A cluster can have several data regions, and you can select the preferred storage type for each. Configuration parameters for data regions are available in the storage-related articles.
-
-We recommend to use B+Tree storage for read-heavy workloads, and RocksDB for write-heavy workloads.
-
-
-== Configuration Example
-
-The example below shows how to configure two data regions, one using RocksDB storage, and another using a volatile page memory:
-
-----
-ignite config set --type cluster \
-"{
-    rocksdb.regions: [{
-        name: lsm_region,
-        size: 256000000
-    }],
-
-    aimem.regions: [{
-        name: btree_volatile_region,
-        maxSize: 256000000
-    }]
-}"
-----
diff --git a/docs/_docs/developers-guide/cache.adoc b/docs/_docs/developers-guide/cache.adoc
new file mode 100644
index 0000000..5b0c24e
--- /dev/null
+++ b/docs/_docs/developers-guide/cache.adoc
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Cache Storage
+
+Ignite Caches are designed as temporary storage for rapid response "cache" of data that may be required for local operation.
+
+Cache data is always stored in a special *cache store*. By default, the cache maintains a weak consistency with the remote storage.
+
+== Difference Between Caches and Tables
+
+Unlike tables, in Ignite caches do not have persistence or store transaction history. This means that caches also do not support read-only transactions and continuous queries, as those features rely on transaction history to ensure consistency.
+
+Fields in caches cannot be nullable.
+
+== Creating a Cache
+
+Caches are created by using the DDL link:/sql-reference/create-cache[CREATE CACHE] command, for example:
+
+[source, SQL]
+----
+CREATE CACHE person (
+    id INT PRIMARY KEY,
+    name VARCHAR,
+    ttl TIMESTAMP
+) EXPIRE AT ttl;
+----
+
+When creating a cache, it must use the `aimem` storage type.
+
+== Working With Caches
+
+Cache API completely mirrors the link:developers-guide/table-api[table] API.
+
+== Cache Modes
+
+Ignite supports 2 modes:
+
+- In write-through mode, the cache maintains strong consistency with the storage.
+- In write-behind mode, the cache maintains eventual consistency with the storage. This may cause consistency issues, but does improve performance.
+//- In reverse replication mode, the cache does not write data
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/clients/cpp.adoc b/docs/_docs/developers-guide/clients/cpp.adoc
new file mode 100644
index 0000000..c929614
--- /dev/null
+++ b/docs/_docs/developers-guide/clients/cpp.adoc
@@ -0,0 +1,512 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= C++ Client
+
+Ignite 3 clients connect to the cluster via a standard socket connection. Unlike Ignite 2.x, there are no separate Thin and Thick clients in Ignite 3. All clients are 'thin'.
+
+Clients do not become a part of the cluster topology, never hold any data, and are not used as a destination for compute calculations.
+
+== Getting Started
+=== Prerequisites
+
+To run C\++ client, you need a C++ build environment to run the `cmake` command:
+
+- C\++ compiler supporting C++ 17;
+- CMake 3.10+;
+- One of build systems: make, ninja, MS Visual Studio, or other;
+- Conan C/C++ package manager 1.X (optional).
+
+
+=== [[build-ref]]Installation
+
+The source code of the C++ client comes with the Ignite 3 distribution. To build it, use the following commands:
+
+
+[tabs]
+--
+tab:Windows[]
+[source,bat]
+----
+mkdir cmake-build-release
+cd cmake-build-release
+conan install .. --build=missing -s build_type=Release
+cmake ..
+cmake --build . -j8
+----
+
+tab:Linux[]
+[source,bash,subs="attributes,specialchars"]
+----
+mkdir cmake-build-release
+cd cmake-build-release
+conan install .. --build=missing -s build_type=Release -s compiler.libcxx=libstdc++11
+cmake .. -DCMAKE_BUILD_TYPE=Release
+cmake --build . -j8
+----
+
+tab:MacOS[]
+[source,bash,subs="attributes,specialchars"]
+----
+mkdir cmake-build-release
+cd cmake-build-release
+conan install .. --build=missing -s build_type=Release -s compiler.libcxx=libc++
+cmake .. -DCMAKE_BUILD_TYPE=Release
+cmake --build . -j8
+----
+
+--
+
+=== Building C++ Client on CentOS 7 and RHEL 7
+
+If you are running on older systems, you need to set up the environment in the following way:
+
+. Install `epel-release` and `centos-release-scl`:
++
+[source,bash]
+----
+yum install epel-release centos-release-scl
+----
++
+. Update yum and accept `epel-release` keys:
++
+[source,bash]
+----
+yum update
+----
++
+. Install the build tools from the main repository and `devtoolset-11`:
++
+[source,bash]
+----
+yum install devtoolset-11-gcc devtoolset-11-gcc-c++ cmake3 git java-11-openjdk-devel gtest-devel gmock-devel
+----
++
+. Install `conan` version 1.56.0 or more recent but older than 2.00:
++
+[source,bash]
+----
+pip3 install -v "conan>=1.56.0,<2.0.0" --force-reinstall
+----
++
+. Create and update alternatives for `cmake` to force the use of `cmake3`:
+.. Create an alternative for `cmake2` with priority 10:
++
+[source,bash]
+----
+sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake 10 \
+--slave /usr/local/bin/ctest ctest /usr/bin/ctest \
+--slave /usr/local/bin/cpack cpack /usr/bin/cpack \
+--slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake \
+--family cmake
+----
++
+.. Create an alternative for `cmake3` with priority 20:
++
+[source,bash]
+----
+sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake3 20 \
+--slave /usr/local/bin/ctest ctest /usr/bin/ctest3 \
+--slave /usr/local/bin/cpack cpack /usr/bin/cpack3 \
+--slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake3 \
+--family cmake
+----
++
+.. Check that the default alternative points to `cmake3`:
++
+[source,bash]
+----
+sudo alternatives --config cmake
+----
++
+. Enable the `devtoolset-11` compiler and start bash with the updated PATH:
++
+[source,bash]
+----
+scl enable devtoolset-11 bash
+----
++
+. Start the link:clients/overview#build-ref[build] in the shell you have established.
+
+== Client Connector Configuration
+
+Client connection parameters are controlled by the client connector configuration. By default, Ignite accepts client connections on port 10800. You can change the configuration for the node by using the link:ignite-cli-tool[CLI tool] at any time.
+
+Here is how the client connector configuration looks like:
+
+[source, json]
+----
+"clientConnector": {
+  "port": 10800,
+  "idleTimeout":3000,
+  "sendServerExceptionStackTraceToClient":true,
+  "ssl": {
+    enabled: true,
+    clientAuth: "require",
+    keyStore: {
+      path: "KEYSTORE_PATH",
+      password: "SSL_STORE_PASS"
+    },
+    trustStore: {
+      path: "TRUSTSTORE_PATH",
+      password: "SSL_STORE_PASS"
+    }
+  }
+}
+
+----
+
+//NOTE: Replace with link to javadoc once it is published.
+
+The table below covers the configuration for client connector:
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+
+|connectTimeout|5000| Connection attempt timeout, in milliseconds.
+|idleTimeout|0|How long the client can be idle before the connection is dropped,in milliseconds. By default, there is no limit.
+|metricsEnabled|`false`|Defines if client metrics are collected.
+|port|10800|The port the client connector will be listening to.
+|sendServerExceptionStackTraceToClient|`false`|Defines if cluster exceptions are sent to the client.
+|ssl.ciphers||The cipher used for SSL communication.
+|ssl.clientAuth||Type of client authentication used by clients. For more information, see link:security/ssl-tls[SSL/TLS].
+|ssl.enabled||Defines if SSL is enabled.
+|ssl.keyStore.password||SSL keystore password.
+|ssl.keyStore.path||Path to the SSL keystore.
+|ssl.keyStore.type|`PKCS12`|The type of SSL keystore used.
+|ssl.trustStore.password||SSL keystore password.
+|ssl.trustStore.path||Path to the SSL keystore.
+|ssl.trustStore.type|`PKCS12`|The type of SSL keystore used.
+|======
+
+Here is how you can change the parameters:
+
+
+----
+node config update clientConnector.port=10469
+----
+
+
+== Connecting to Cluster
+
+To initialize a client, use the `IgniteClient` class, and provide it with the configuration:
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+ignite_client_configuration cfg{"127.0.0.1"};
+auto client = ignite_client::start(cfg, std::chrono::seconds(5));
+----
+--
+
+== Authentication
+
+To pass authentication information, pass it to `IgniteClient` builder:
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+auto authenticator = std::make_shared<ignite::basic_authenticator>("myUser", "myPassword");
+
+ignite::ignite_client_configuration cfg{"127.0.0.1:10800"};
+cfg.set_authenticator(authenticator);
+auto client = ignite_client::start(std::move(cfg), std::chrono::seconds(30));
+----
+--
+
+== User Object Serialization
+
+Ignite supports mapping user objects to table tuples. This ensures that objects created in any programming language can be used for key-value operations directly.
+
+=== Limitations
+
+There are limitations to user types that can be used for such a mapping. Some limitations are common, and others are platform-specific due to the programming language used.
+
+- Only flat field structure is supported, meaning no nesting user objects. This is because Ignite tables, and therefore tuples have flat structure themselves;
+- Fields should be mapped to Ignite types;
+- All fields in user type should either be mapped to Table column or explicitly excluded;
+- All columns from Table should be mapped to some field in the user type;
+- *C++ only*: User has to provide marshaling functions explicitly as there is no reflection to generate them based on user type structure.
+
+=== Usage Examples
+
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+struct account {
+  account() = default;
+  account(std::int64_t id) : id(id) {}
+  account(std::int64_t id, std::int64_t balance) : id(id), balance(balance) {}
+
+  std::int64_t id{0};
+  std::int64_t balance{0};
+};
+
+namespace ignite {
+
+  template<>
+  ignite_tuple convert_to_tuple(account &&value) {
+    ignite_tuple tuple;
+
+    tuple.set("id", value.id);
+    tuple.set("balance", value.balance);
+
+    return tuple;
+  }
+
+  template<>
+  account convert_from_tuple(ignite_tuple&& value) {
+    account res;
+
+    res.id = value.get<std::int64_t>("id");
+
+    // Sometimes only key columns are returned, i.e. "id",
+    // so we have to check whether there are any other columns.
+    if (value.column_count() > 1)
+      res.balance = value.get<std::int64_t>("balance");
+
+    return res;
+  }
+
+} // namespace ignite
+----
+--
+
+
+== SQL API
+
+Ignite 3 is focused on SQL, and SQL API is the primary way to work with the data. You can read more about supported SQL statements in the link:sql-reference/ddl[SQL Reference] section. Here is how you can send SQL requests:
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+result_set result = client.get_sql().execute(nullptr, {"select name from tbl where id = ?"}, {std::int64_t{42});
+std::vector<ignite_tuple> page = result_set.current_page();
+ignite_tuple& row = page.front();
+----
+
+--
+
+
+=== SQL Scripts
+
+The default API executes SQL statements one at a time. If you want to execute large SQL statements, pass them to the `executeScript()` method. These statements will be executed in order.
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+std::string script = ""
+	+ "CREATE TABLE IF NOT EXISTS Person (id int primary key, city_id int, name varchar, age int, company varchar);"
+	+ "INSERT INTO Person (1,3, 'John', 43, 'Sample')";
+
+client.get_sql().execute_script(script);
+----
+--
+
+NOTE: Execution of each statement is considered complete when the first page is ready to be returned. As a result, when working with large data sets, SELECT statement may be affected by later statements in the same script.
+
+== Transactions
+
+All table operations in Ignite 3 are transactional. You can provide an explicit transaction as a first argument of any Table and SQL API call. If you do not provide an explicit transaction, an implicit one will be created for every call.
+
+Here is how you  can provide a transaction explicitly:
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+auto accounts = table.get_key_value_view<account, account>();
+
+account init_value(42, 16'000);
+accounts.put(nullptr, {42}, init_value);
+
+auto tx = client.get_transactions().begin();
+
+std::optional<account> res_account = accounts.get(&tx, {42});
+res_account->balance += 500;
+accounts.put(&tx, {42}, res_account);
+
+assert(accounts.get(&tx, {42})->balance == 16'500);
+
+tx.rollback();
+
+assert(accounts.get(&tx, {42})->balance == 16'000);
+----
+
+--
+
+== Table API
+
+To execute table operations on a specific table, you need to get a specific view of the table and use one of its methods. You can only create new tables by using SQL API.
+
+When working with tables, you can use built-in Tuple type, which is a set of key-value pairs underneath, or map the data to your own types for a strongly-typed access. Here is how you can work with tables:
+
+=== Getting a Table Instance
+
+First, get an instance of the table. To obtain an instance of table, use the `IgniteTables.table(String)` method. You can also use `IgniteTables.tables()` method to list all existing tables.
+
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+auto table_api = client.get_tables();
+std::vector<table> existing_tables = table_api.get_tables();
+table first_table = existing_tables.front();
+
+std::optional<table> my_table = table_api.get_table("MY_TABLE);
+----
+--
+
+=== Basic Table Operations
+
+Once you've got a table you need to get a specific view to choose how you want to operate table records.
+
+==== Binary Record View
+
+A binary record view. It can be used to operate table tuples directly.
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+record_view<ignite_tuple> view = table.get_record_binary_view();
+
+ignite_tuple record{
+  {"id", 42},
+  {"name", "John Doe"}
+};
+
+view.upsert(nullptr, record);
+std::optional<ignite_tuple> res_record = view.get(nullptr, {"id", 42});
+
+assert(res_record.has_value());
+assert(res_record->column_count() == 2);
+assert(res_record->get<std::int64_t>("id") == 42);
+assert(res_record->get<std::string>("name") == "John Doe");
+----
+
+--
+
+==== Record View
+
+A record view mapped to a user type. It can be used to operate table using user objects which are mapped to table tuples.
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+record_view<person> view = table.get_record_view<person>();
+
+person record(42, "John Doe");
+
+view.upsert(nullptr, record);
+std::optional<person> res_record = view.get(nullptr, person{42});
+
+assert(res.has_value());
+assert(res->id == 42);
+assert(res->name == "John Doe");
+----
+
+--
+
+==== Key-Value Binary View
+
+A binary key-value view. It can be used to operate table using key and value tuples separately.
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+key_value_view<ignite_tuple, ignite_tuple> kv_view = table.get_key_value_binary_view();
+
+ignite_tuple key_tuple{{"id", 42}};
+ignite_tuple val_tuple{{"name", "John Doe"}};
+
+kv_view.put(nullptr, key_tuple, val_tuple);
+std::optional<ignite_tuple> res_tuple = kv_view.get(nullptr, key_tuple);
+
+assert(res_tuple.has_value());
+assert(res_tuple->column_count() == 2);
+assert(res_tuple->get<std::int64_t>("id") == 42);
+assert(res_tuple->get<std::string>("name") == "John Doe");
+----
+--
+
+
+==== Key-Value View
+
+A key-value view with user objects. It can be used to operate table using key and value user objects mapped to table tuples.
+
+[tabs]
+--
+tab:C++[]
+[source, cpp]
+----
+key_value_view<person, person> kv_view = table.get_key_value_view<person, person>();
+
+kv_view.put(nullptr, {42}, {"John Doe"});
+std::optional<person> res = kv_view.get(nullptr, {42});
+
+assert(res.has_value());
+assert(res->id == 42);
+assert(res->name == "John Doe");
+----
+--
+
+== Streaming Data
+
+To stream a large amount of data, use the data streamer. Data streaming provides a quicker and more efficient way to load, organize and optimally distribute your data. Data streamer accepts a stream of data and distributes data entries across the cluster, where the processing takes place. Data streaming is available in all table views.
+
+image::images/data_streaming.png[]
+
+Data streaming provides at-least-once delivery guarantee.
+
+=== Using Data Streamer API
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+public async Task TestBasicStreamingRecordBinaryView()
+{
+    var options = DataStreamerOptions.Default with { BatchSize = 10 };
+    var data = Enumerable.Range(0, Count).Select(x => new IgniteTuple { ["id"] = 1L, ["name"] = "foo" }).ToList();
+
+    await TupleView.StreamDataAsync(data.ToAsyncEnumerable(), options);
+}
+----
+--
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/clients/dotnet.adoc b/docs/_docs/developers-guide/clients/dotnet.adoc
new file mode 100644
index 0000000..59945a8
--- /dev/null
+++ b/docs/_docs/developers-guide/clients/dotnet.adoc
@@ -0,0 +1,369 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= .NET Client
+
+Ignite 3 clients connect to the cluster via a standard socket connection. Unlike Ignite 2.x, there is no separate Thin and Thick clients in Ignite 3. All clients are 'thin'.
+
+Clients do not become a part of the cluster topology, never hold any data, and are not used as a destination for compute calculations.
+
+== Getting Started
+
+=== Prerequisites
+
+To use C# thin client, .NET 6.0 or newer is required.
+
+=== Installation
+
+C# client is available via NuGet. To add it, use the `add package` command:
+
+----
+dotnet add package Apache.Ignite --version 3.0.0-beta2
+----
+
+== Connecting to Cluster
+
+To initialize a client, use the `IgniteClient` class, and provide it with the configuration:
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+var clientCfg = new IgniteClientConfiguration
+{
+  Endpoints = { "127.0.0.1" }
+};
+using var client = await IgniteClient.StartAsync(clientCfg);
+----
+--
+
+== Authentication
+
+To pass authentication information, pass it to `IgniteClient` builder:
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+var cfg = new IgniteClientConfiguration("127.0.0.1:10800")
+{
+	Authenticator = new BasicAuthenticator
+	{
+		Username = "myUser",
+		Password = "myPassword"
+	}
+};
+IIgniteClient client = await IgniteClient.StartAsync(cfg);
+----
+--
+
+=== Limitations
+
+There are limitations to user types that can be used for such a mapping. Some limitations are common, and others are platform-specific due to the programming language used.
+
+- Only flat field structure is supported, meaning no nesting user objects. This is because Ignite tables, and therefore tuples have flat structure themselves;
+- Fields should be mapped to Ignite types;
+- All fields in user type should either be mapped to Table column or explicitly excluded;
+- All columns from Table should be mapped to some field in the user type;
+- *.NET only*: Any type (class, struct, record) is supported as long as all fields can be mapped to Ignite types;
+
+=== Usage Examples
+
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+public class Account
+{
+  public long Id { get; set; }
+  public long Balance { get; set; }
+
+  [NotMapped]
+  public Guid UnmappedId { get; set; }
+}
+----
+--
+
+
+== SQL API
+
+Ignite 3 is focused on SQL, and SQL API is the primary way to work with the data. You can read more about supported SQL statements in the link:sql-reference/ddl[SQL Reference] section. Here is how you can send SQL requests:
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+IResultSet<IIgniteTuple> resultSet = await client.Sql.ExecuteAsync(transaction: null, "select name from tbl where id = ?", 42);
+List<IIgniteTuple> rows = await resultSet.ToListAsync();
+IIgniteTuple row = rows.Single();
+Debug.Assert(row["name"] as string == "John Doe");
+----
+--
+
+
+=== SQL Scripts
+
+The default API executes SQL statements one at a time. If you want to execute large SQL statements, pass them to the `executeScript()` method. These statements will be executed in order.
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+string script =
+    "CREATE TABLE IF NOT EXISTS Person (id int primary key, city_id int, name varchar, age int, company varchar);" +
+    "INSERT INTO Person (1,3, 'John', 43, 'Sample')";
+
+await Client.Sql.ExecuteScriptAsync(script);
+----
+--
+
+NOTE: Execution of each statement is considered complete when the first page is ready to be returned. As a result, when working with large data sets, SELECT statement may be affected by later statements in the same script.
+
+== Transactions
+
+All table operations in Ignite 3 are transactional. You can provide an explicit transaction as a first argument of any Table and SQL API call. If you do not provide an explicit transaction, an implicit one will be created for every call.
+
+Here is how you  can provide a transaction explicitly:
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+var accounts = table.GetKeyValueView<long, Account>();
+await accounts.PutAsync(transaction: null, 42, new Account(16_000));
+
+await using ITransaction tx = await client.Transactions.BeginAsync();
+
+(Account account, bool hasValue) = await accounts.GetAsync(tx, 42);
+account = account with { Balance = account.Balance + 500 };
+
+await accounts.PutAsync(tx, 42, account);
+
+Debug.Assert((await accounts.GetAsync(tx, 42)).Value.Balance == 16_500);
+
+await tx.RollbackAsync();
+
+Debug.Assert((await accounts.GetAsync(null, 42)).Value.Balance == 16_000);
+
+public record Account(decimal Balance);
+----
+--
+
+== Table API
+
+To execute table operations on a specific table, you need to get a specific view of the table and use one of its methods. You can only create new tables by using SQL API.
+
+When working with tables, you can use built-in Tuple type, which is a set of key-value pairs underneath, or map the data to your own types for a strongly-typed access. Here is how you can work with tables:
+
+=== Getting a Table Instance
+
+First, get an instance of the table. To obtain an instance of table, use the `IgniteTables.table(String)` method. You can also use `IgniteTables.tables()` method to list all existing tables.
+
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+var existingTables = await Client.Tables.GetTablesAsync();
+var firstTable = existingTables[0];
+
+var myTable = await Client.Tables.GetTableAsync("MY_TABLE");
+----
+--
+
+=== Basic Table Operations
+
+Once you've got a table you need to get a specific view to choose how you want to operate table records.
+
+==== Binary Record View
+
+A binary record view. It can be used to operate table tuples directly.
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+IRecordView<IIgniteTuple> view = table.RecordBinaryView;
+
+IIgniteTuple fullRecord = new IgniteTuple
+{
+  ["id"] = 42,
+  ["name"] = "John Doe"
+};
+
+await view.UpsertAsync(transaction: null, fullRecord);
+
+IIgniteTuple keyRecord = new IgniteTuple { ["id"] = 42 };
+(IIgniteTuple value, bool hasValue) = await view.GetAsync(transaction: null, keyRecord);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.FieldCount == 2);
+Debug.Assert(value["id"] as int? == 42);
+Debug.Assert(value["name"] as string == "John Doe");
+----
+--
+
+==== Record View
+
+A record view mapped to a user type. It can be used to operate table using user objects which are mapped to table tuples.
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+var pocoView = table.GetRecordView<Poco>();
+
+await pocoView.UpsertAsync(transaction: null, new Poco(42, "John Doe"));
+var (value, hasValue) = await pocoView.GetAsync(transaction: null, new Poco(42));
+
+Debug.Assert(hasValue);
+Debug.Assert(value.Name == "John Doe");
+
+public record Poco(long Id, string? Name = null);
+----
+--
+
+==== Key-Value Binary View
+
+A binary key-value view. It can be used to operate table using key and value tuples separately.
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+IKeyValueView<IIgniteTuple, IIgniteTuple> kvView = table.KeyValueBinaryView;
+
+IIgniteTuple key = new IgniteTuple { ["id"] = 42 };
+IIgniteTuple val = new IgniteTuple { ["name"] = "John Doe" };
+
+await kvView.PutAsync(transaction: null, key, val);
+(IIgniteTuple? value, bool hasValue) = await kvView.GetAsync(transaction: null, key);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.FieldCount == 1);
+Debug.Assert(value["name"] as string == "John Doe");
+----
+--
+
+
+==== Key-Value View
+
+A key-value view with user objects. It can be used to operate table using key and value user objects mapped to table tuples.
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+IKeyValueView<long, Poco> kvView = table.GetKeyValueView<long, Poco>();
+
+await kvView.PutAsync(transaction: null, 42, new Poco(Id: 0, Name: "John Doe"));
+(Poco? value, bool hasValue) = await kvView.GetAsync(transaction: null, 42);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.Name == "John Doe");
+
+public record Poco(long Id, string? Name = null);
+----
+--
+
+== Streaming Data
+
+To stream a large amount of data, use the data streamer. Data streaming provides a quicker and more efficient way to load, organize and optimally distribute your data. Data streamer accepts a stream of data and distributes data entries across the cluster, where the processing takes place. Data streaming is available in all table views.
+
+image::images/data_streaming.png[]
+
+Data streaming provides at-least-once delivery guarantee.
+
+=== Using Data Streamer API
+
+[tabs]
+--
+tab:.NET[]
+[source, csharp]
+----
+public async Task TestBasicStreamingRecordBinaryView()
+{
+    var options = DataStreamerOptions.Default with { BatchSize = 10 };
+    var data = Enumerable.Range(0, Count).Select(x => new IgniteTuple { ["id"] = 1L, ["name"] = "foo" }).ToList();
+
+    await TupleView.StreamDataAsync(data.ToAsyncEnumerable(), options);
+}
+----
+--
+
+
+== Client Metrics
+
+Metrics are exposed by the .NET client through the `System.Diagnostics.Metrics` API with the `Apache.Ignite` meter name. For example, here is how you can access Ignite metrics by using the link:https://learn.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-counters[dotnet-counters] tool:
+
+[source, bash]
+----
+dotnet-counters monitor --counters Apache.Ignite,System.Runtime --process-id PID
+----
+
+You can also get metrics in your code by creating a listener:
+
+[source, csharp]
+----
+var listener = new MeterListener();
+listener.InstrumentPublished = (instrument, meterListener) =>
+{
+    if (instrument.Meter.Name == "Apache.Ignite")
+    {
+        meterListener.EnableMeasurementEvents(instrument);
+    }
+};
+listener.SetMeasurementEventCallback<int>(
+    (instrument, measurement, tags, state) => Console.WriteLine($"{instrument.Name}: {measurement}"));
+
+listener.Start();
+----
+
+
+=== Available .NET Metrics
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+
+|connections-active|The number of currently active connections.
+|connections-established|The number of established connections.
+|connections-lost|The number of connections lost.
+|connections-lost-timeout|The number of connections lost due to a timeout.
+|handshakes-failed|The number of failed handshakes.
+|handshakes-failed-timeout|The number of handshakes that failed due to a timeout.
+|requests-active|The number of currently active requests.
+|requests-sent|The number of requests sent.
+|requests-completed|The number of completed requests. Requests are completed once a response is received.
+|requests-retried|The number of request retries.
+|requests-failed|The number of failed requests.
+|bytes-sent|The amount of bytes sent.
+|bytes-received|The amount of bytes received.
+|streamer-batches-sent|The number of data streamer batches sent.
+|streamer-items-sent|The number of data streamer items sent.
+|streamer-batches-active|The number of existing data streamer batches.
+|streamer-items-queued|The number of queued data streamer items.
+
+|=======================================================================
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/clients/java.adoc b/docs/_docs/developers-guide/clients/java.adoc
new file mode 100644
index 0000000..13fbcef
--- /dev/null
+++ b/docs/_docs/developers-guide/clients/java.adoc
@@ -0,0 +1,144 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Java Client
+
+Ignite 3 clients connect to the cluster via a standard socket connection. Unlike Ignite 2.x, there is no separate Thin and Thick clients in Ignite 3. All clients are 'thin'.
+
+Clients do not become a part of the cluster topology, never hold any data, and are not used as a destination for compute calculations.
+
+== Getting Started
+=== Prerequisites
+
+To use Java thin client, Java 11 or newer is required.
+
+=== Installation
+
+Java client can be added to your project by using maven:
+
+[source, xml]
+----
+<dependency>
+    <groupId>org.apache.ignite</groupId>
+    <artifactId>ignite-client</artifactId>
+    <version>3.0.0-beta2</version>
+</dependency>
+----
+
+== Connecting to Cluster
+
+To initialize a client, use the `IgniteClient` class, and provide it with the configuration:
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+try (IgniteClient client = IgniteClient.builder()
+  .addresses("127.0.0.1:10800")
+  .build()
+) {
+  // Your code goes here
+}
+----
+--
+
+== Authentication
+
+To pass authentication information, use the `IgniteClientAuthenticator` class and pass it to `IgniteClient` builder:
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+IgniteClientAuthenticator auth = BasicAuthenticator.builder().username("myUser").password("myPassword").build()
+IgniteClient.builder()
+            .addresses("127.0.0.1:" + server.port())
+            .authenticator(authenticator)
+            .build();
+----
+--
+
+=== Limitations
+
+There are limitations to user types that can be used for such a mapping. Some limitations are common, and others are platform-specific due to the programming language used.
+
+- Only flat field structure is supported, meaning no nesting user objects. This is because Ignite tables, and therefore tuples have flat structure themselves;
+- Fields should be mapped to Ignite types;
+- All fields in user type should either be mapped to Table column or explicitly excluded;
+- All columns from Table should be mapped to some field in the user type;
+- *Java only*: Users should implement Mapper classes for user types for more flexibility;
+
+=== SQL Scripts
+
+The default API executes SQL statements one at a time. If you want to execute large SQL statements, pass them to the `executeScript()` method. These statements will be executed in order.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+String script = ""
+                + "CREATE TABLE IF NOT EXISTS Person (id int primary key, city_id int, name varchar, age int, company varchar);"
+                + "INSERT INTO Person (1,3, John, 43, Sample)";
+
+ignite.sql().createSession().executeScript(script);
+----
+--
+
+NOTE: Execution of each statement is considered complete when the first page is ready to be returned. As a result, when working with large data sets, SELECT statement may be affected by later statements in the same script.
+
+
+== Client Metrics
+
+=== Java
+
+When running Java client, you need to enable metrics in the client builder:
+
+[source, java]
+----
+IgniteClient client = IgniteClient.builder()
+  .addresses("127.0.0.1:10800")
+  .metricsEnabled(true)
+  .build()
+
+----
+
+After that, client metrics will be available to any Java monitoring tool, for example link:https://www.oracle.com/java/technologies/jdk-mission-control.html[JDK Mission Control].
+
+==== Available Java Metrics
+
+[width="100%",cols="20%,80%",opts="header"]
+|=======================================================================
+|Metric name | Description
+
+|ConnectionsActive|The number of currently active connections.
+|ConnectionsEstablished|The number of established connections.
+|ConnectionsLost|The number of connections lost.
+|ConnectionsLostTimeout|The number of connections lost due to a timeout.
+|HandshakesFailed|The number of failed handshakes.
+|HandshakesFailedTimeout|The number of handshakes that failed due to a timeout.
+|RequestsActive|The number of currently active requests.
+|RequestsSent|The number of requests sent.
+|RequestsCompleted|The number of completed requests. Requests are completed once a response is received.
+|RequestsRetried|The number of request retries.
+|RequestsFailed|The number of failed requests.
+|BytesSent|The amount of bytes sent.
+|BytesReceived|The amount of bytes received.
+|StreamerBatchesSent|The number of data streamer batches sent.
+|StreamerItemsSent|The number of data streamer items sent.
+|StreamerBatchesActive|The number of existing data streamer batches.
+|StreamerItemsQueued|The number of queued data streamer items.
+
+|=======================================================================
\ No newline at end of file
diff --git a/docs/_docs/thin-clients/linq.adoc b/docs/_docs/developers-guide/clients/linq.adoc
similarity index 92%
rename from docs/_docs/thin-clients/linq.adoc
rename to docs/_docs/developers-guide/clients/linq.adoc
index be47332..fc6e4b5 100644
--- a/docs/_docs/thin-clients/linq.adoc
+++ b/docs/_docs/developers-guide/clients/linq.adoc
@@ -1,6 +1,20 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 = .NET LINQ Queries
 
-Apache Ignite .NET client provides LINQ support that is integrated with Ignite SQL APIs. You can avoid working with SQL syntax directly and write queries in C# with LINQ. C# LINQ expressions are then translated into Ignite-specific SQL. For example, the following two snippets achieve the same result:
+Ignite .NET client provides LINQ support that is integrated with Ignite SQL APIs. You can avoid working with SQL syntax directly and write queries in C# with LINQ. C# LINQ expressions are then translated into Ignite-specific SQL. For example, the following two snippets achieve the same result:
 
 [tabs]
 --
@@ -37,12 +51,12 @@
 * LINQ is safe against SQL injections;
 * Results are mapped to types naturally.
 
-In real-world scenarios the performance of Apache Ignite LINQ queries is on par with equivalent SQL queries.
+In real-world scenarios the performance of Ignite LINQ queries is on par with equivalent SQL queries.
 However, a small overhead still exists (due to query translation), and your mileage may vary depending on the query complexity, so it is recommended to measure the performance of your queries.
 
 == Getting Started With LINQ
 
-Here is how you can create a simple table in Apache Ignite:
+Here is how you can create a simple table in Ignite:
 
 1. Create a table:
 +
@@ -339,7 +353,7 @@
 
 === Aggregate Functions
 
-Below is a list of .NET aggregate functions and their SQL equivalents that are supported in Apache Ignite:
+Below is a list of .NET aggregate functions and their SQL equivalents that are supported in Ignite:
 
 [cols="30%,30%,30%", width="70%"]
 |===
diff --git a/docs/_docs/developers-guide/clients/overview.adoc b/docs/_docs/developers-guide/clients/overview.adoc
new file mode 100644
index 0000000..e3e46fb
--- /dev/null
+++ b/docs/_docs/developers-guide/clients/overview.adoc
@@ -0,0 +1,114 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Ignite Clients
+
+Ignite 3 clients connect to the cluster via a standard socket connection. Unlike Ignite 2.x, there is no separate Thin and Thick clients in Ignite 3. All clients are 'thin'.
+
+Clients do not become a part of the cluster topology, never hold any data, and are not used as a destination for compute calculations.
+
+== Client Connector Configuration
+
+Client connection parameters are controlled by the client connector configuration. By default, Ignite accepts client connections on port 10800. You can change the configuration for the node by using the link:ignite-cli-tool[CLI tool] at any time.
+
+Here is how the client connector configuration looks like:
+
+[source, json]
+----
+"clientConnector": {
+  "port": 10800,
+  "idleTimeout":3000,
+  "sendServerExceptionStackTraceToClient":true,
+  "ssl": {
+    enabled: true,
+    clientAuth: "require",
+    keyStore: {
+      path: "KEYSTORE_PATH",
+      password: "SSL_STORE_PASS"
+    },
+    trustStore: {
+      path: "TRUSTSTORE_PATH",
+      password: "SSL_STORE_PASS"
+    }
+  }
+}
+
+----
+
+//NOTE: Replace with link to javadoc once it is published.
+
+The table below covers the configuration for client connector:
+
+[cols="1,1,3",opts="header", stripes=none]
+|======
+|Property|Default|Description
+
+|connectTimeout|5000| Connection attempt timeout, in milliseconds.
+|idleTimeout|0|How long the client can be idle before the connection is dropped,in milliseconds. By default, there is no limit.
+|metricsEnabled|`false`|Defines if client metrics are collected.
+|port|10800|The port the client connector will be listening to.
+|sendServerExceptionStackTraceToClient|`false` a| 
+By default, only the exception message and code are sent back to the client. 
+
+Set this property to `true` to include the full stack trace, which will appear as part of the client-side exception. 
+
+NOTE: Not recommended for production: stack trace disclosure is a link:https://owasp.org/www-community/Improper_Error_Handling[security weakness].  
+|ssl.ciphers||The cipher used for SSL communication.
+|ssl.clientAuth||Type of client authentication used by clients. For more information, see link:security/ssl-tls[SSL/TLS].
+|ssl.enabled||Defines if SSL is enabled.
+|ssl.keyStore.password||SSL keystore password.
+|ssl.keyStore.path||Path to the SSL keystore.
+|ssl.keyStore.type|`PKCS12`|The type of SSL keystore used.
+|ssl.trustStore.password||SSL keystore password.
+|ssl.trustStore.path||Path to the SSL keystore.
+|ssl.trustStore.type|`PKCS12`|The type of SSL keystore used.
+|======
+
+Here is how you can change the parameters:
+
+
+----
+node config update clientConnector.port=10469
+----
+
+=== Limitations
+
+There are limitations to user types that can be used for such a mapping. Some limitations are common, and others are platform-specific due to the programming language used.
+
+- Only flat field structure is supported, meaning no nesting user objects. This is because Ignite tables, and therefore tuples have flat structure themselves;
+- Fields should be mapped to Ignite types;
+- All fields in user type should either be mapped to Table column or explicitly excluded;
+- All columns from Table should be mapped to some field in the user type;
+- *Java only*: Users should implement Mapper classes for user types for more flexibility;
+
+== Partition Awareness
+
+In Ignite 3, partition awareness is enabled automatically for all clients.
+
+Data in the cluster is distributed between the nodes in a balanced manner for scalability and performance reasons. Each cluster node maintains a subset of the data, and the partition distribution map, which is used to determine the node that keeps the primary/backup copy of requested entries.
+
+Partition awareness allows the client to send query requests directly to the node that owns the queried data.
+
+Without partition awareness, an application that is connected to the cluster via a client would execute all queries and operations via a single server node that acts as a proxy for the incoming requests.
+These operations would then be re-routed to the node that stores the data that is being requested.
+This would result in a bottleneck that could prevent the application from scaling linearly.
+
+image::images/partitionawareness01.png[Without Partition Awareness]
+
+Notice how queries must pass through the proxy server node, where they are routed to the correct node.
+
+With partition awareness in place, the client can directly route queries and operations to the primary nodes that own the data required for the queries.
+This eliminates the bottleneck, allowing the application to scale more easily.
+
+image::images/partitionawareness02.png[With Partition Awareness]
diff --git a/docs/_docs/developers-guide/code-deployment/code-deployment.adoc b/docs/_docs/developers-guide/code-deployment/code-deployment.adoc
new file mode 100644
index 0000000..e41720d
--- /dev/null
+++ b/docs/_docs/developers-guide/code-deployment/code-deployment.adoc
@@ -0,0 +1,102 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Code Deployment
+
+When working with Ignite, you may need to deploy user code to nodes. For example, you may need the code used in link:compute/compute[Distributed Computing].
+
+In Ignite 3, when you deploy user code to the cluster, it is saved as a *deployment unit*. Deployment units have a unique ID and are immutable. When you need to update code version, you will need to deploy a new unit.
+
+All interactions with deployment units are performed by using REST API. REST endpoints are available for deploying units, undeploying units, and checking deployment unit statuses either on node or across the whole cluster.
+
+== Deploying New Unit
+
+To create a new deployment unit, send a request to the `/management/v1/deployment/units/{unitId}/{unitVersion}` endpoint. You can deploy code to one node, and it will be propagated to the cluster as necessary, but generally it is better to provide the list of all nodes when deploying user code.
+
+Below is the list of parameters for the endpoint:
+
+[width="100%",cols="1,1,3",opts="header"]
+|===
+
+|Parameter|Type|Description
+
+|unitId|path|*Required* Unique unit ID. If a deployment unit with this ID does not exist, it is created. Otherwise, a new version is created instead.
+|unitVersion|path|*Required* Unique version of the deployment unit. If a deployment unit with the specified UI and version already exists, HTTP 409 "Conflict" response is returned.
+|deployMode|query|Defines how many nodes the unit will be deployed to. If set to `MAJORITY`, the unit will be deployed to enough nodes to form cluster management group majority. If set to `ALL`, the unit will be deployed to all nodes. Cannot be used with the `initialNodes` parameter.
+|initialNodes|query|The list of names of specific nodes to deploy the unit to. Cannot be used with the `deployMode` parameter.
+
+|===
+
+== Getting Unit Information
+
+Deployment unit information is available via multiple endpoints:
+
+=== Getting Specific Unit Information
+
+The `/management/v1/deployment/node/units/{unitId}` and `/management/v1/deployment/cluster/units/{unitId}` endpoints provide information about the deployment unit specified in the request, for the node or across the cluster respectively.
+
+You can further narrow down the search by looking up only for specific versions or statuses.
+
+[width="100%",cols="1,1,3a",opts="header"]
+|===
+
+|Parameter|Type|Description
+
+|unitId|path|*Required* Unique unit ID of the deployment unit.
+|version|query|Unique version of the deployment unit. If not specified, all versions of deployment unit will be returned.
+|statuses|query|Statuses of the deployment units to return. Possible values:
+
+- `UPLOADING` - the unit is being deployed to the cluster
+- `DEPLOYED` - the unit is deployed to the cluster and can be used
+- `OBSOLETE` - the command to remove unit has been received, but it is still used in some jobs
+- `REMOVING` - the unit is being removed
+
+If not specified, deployment units in all statuses will be returned.
+|===
+
+
+=== Getting Information for All Units
+
+The `/management/v1/deployment/node/units` and `/management/v1/deployment/cluster/units` endpoints provide information about all deployments unit, for the node or across the cluster respectively. Optionally, you can search for only deployment units in a specific status.
+
+
+[width="100%",cols="1,1,3a",opts="header"]
+|===
+
+|Parameter|Type|Description
+
+|statuses|query|Statuses of the deployment units to return. Possible values:
+
+- `UPLOADING` - the unit is being deployed to the cluster
+- `DEPLOYED` - the unit is deployed to the cluster and can be used
+- `OBSOLETE` - the command to remove unit has been received, but it is still used in some jobs
+- `REMOVING` - the unit is being removed
+
+If not specified, deployment units in all statuses will be returned.
+|===
+
+== Undeploying Unit
+
+When you no longer need a deployment unit version, send a request to the `/management/v1/deployment/units/{unitId}/{unitVersion}` endpoint. When the cluster receives the request, it will delete the specified deployment unit version on all nodes. If the unit is used in a job, it will instead be moved to the `OBSOLETE` status and removed once it is no longer required.
+
+[width="100%",cols="1,1,3",opts="header"]
+|===
+
+|Parameter|Type|Description
+
+|unitId|path|*Required* Unique unit ID of the deployment unit to delete.
+|unitVersion|path|*Required* Unique version of the deployment unit to delete.
+
+|===
+
diff --git a/docs/_docs/developers-guide/compute/compute.adoc b/docs/_docs/developers-guide/compute/compute.adoc
new file mode 100644
index 0000000..c1c7ec6
--- /dev/null
+++ b/docs/_docs/developers-guide/compute/compute.adoc
@@ -0,0 +1,369 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Distributed Computing
+
+Ignite 3 provides an API for distributing computations across cluster nodes in a balanced and fault-tolerant manner. You can submit individual tasks for execution from Java and .NET clients.
+
+You can use Java or .NET client to execute compute jobs. Make sure the required classes are deployed to the cluster before executing code.
+
+The example below assumes that the `NodeNameJob` class has been deployed to the node by using link:developers-guide/code-deployment[code deployment].
+
+//== Synchronous Computation
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+private void example() {
+    IgniteClient client = client();
+    IgniteCompute compute = client.compute();
+    Set<ClusterNode> nodes = new HashSet<>(client.clusterNodes());
+
+    //Unit `unitName:1.1.1` contains NodeNameJob class.
+    List<DeploymentUnit> units = List.of(new DeploymentUnit("unitName", Version.parseVersion("1.1.1"));
+
+    JobExecution<String> execution = compute.executeAsync(nodes, units, NodeNameJob.class, "Hello");
+    var result = execution.resultAsync()
+}
+----
+
+
+NOTE: Unlike in Ignite 2, jobs are not serialized. Only the class name and arguments are sent to the node.
+
+tab:.NET[]
+[source, csharp]
+----
+IIgniteClient client = Client;
+ICompute compute = client.Compute;
+IList<IClusterNode> nodes = await Client.GetClusterNodesAsync();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+var units = new List<DeploymentUnit> { new DeploymentUnit("unitName", "1.1.1") };
+
+IJobExecution<string> execution = await compute.SubmitAsync<string>(nodes, units, NodeNameJob, JobExecutionOptions.Default, "Hello");
+string result = await execution.GetResultAsync();
+----
+
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+compute comp = client.get_compute();
+std::vector<cluster_node> nodes = client.get_nodes();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+std::vector<deployment_unit> units{deployment_unit{"unitName", "1.1.1"}};
+
+job_execution execution = comp.submit(nodes, units, NODE_NAME_JOB, {std::string("Hello")}, {});
+std::string result = execution.get_result()->get<std::string>();
+----
+--
+
+//== Asynchronous Computation
+// Placeholder for when API is complete
+
+== Job Ownership
+
+If the cluster has link:security/authentication[Authentication] enabled, compute jobs are executed by a specific user. If user permissions are configured on the cluster, the user needs the appropriate link:security/permissions#distributed-computing[distributed computing permissions] to work with distributed computing jobs. Only users with `JOBS_ADMIN` action can interact with jobs of other users.
+
+== Job Execution States
+
+You can keep track of the status of the job on the server and react to status changes. For example:
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+private void example() {
+    IgniteClient client = client();
+    IgniteCompute compute = client.compute();
+    Set<ClusterNode> nodes = new HashSet<>(client.clusterNodes());
+
+    // Unit `unitName:1.1.1` contains NodeNameJob class.
+    List<DeploymentUnit> units = List.of(new DeploymentUnit("unitName", Version.parseVersion("1.1.1"));
+
+    JobExecution<String> execution = compute.executeAsync(nodes, units, NodeNameJob.class, "Hello");
+
+    execution.statusAsync().thenApply(status -> {
+       if (status == "Failed") {
+        // Handle failure
+       }
+    });
+
+    var result = execution.resultAsync()
+}
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IIgniteClient client = Client;
+ICompute compute = client.Compute;
+IList<IClusterNode> nodes = await Client.GetClusterNodesAsync();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+var units = new List<DeploymentUnit> { new DeploymentUnit("unitName", "1.1.1") };
+
+IJobExecution<string> execution = await compute.SubmitAsync<string>(nodes, units, NodeNameJob, JobExecutionOptions.Default, "Hello");
+
+JobStatus? status = await execution.GetStatusAsync();
+
+if (status?.State == JobState.Failed)
+{
+    // Handle failure
+}
+
+string result = await execution.GetResultAsync();
+----
+
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+compute comp = client.get_compute();
+std::vector<cluster_node> nodes = client.get_nodes();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+std::vector<deployment_unit> units{deployment_unit{"unitName", "1.1.1"}};
+
+job_execution execution = comp.submit(nodes, units, NODE_NAME_JOB, {std::string("Hello")}, {});
+
+std::optional<job_status> status = execution.get_status();
+if (status && status->state == job_state::FAILED)
+{
+    // Handle failure
+}
+std::string result = execution.get_result()->get<std::string>();
+----
+--
+
+The table below lists the possible job statuses:
+
+[width="100%",cols="20%,60%,20%"]
+|=======================================================================
+|Status |Description |Transitions to
+
+| `Submitted` | The job was created and sent to the cluster, but not yet processed. | `Queued`, `Canceled`
+| `Queued` | The job was added to the queue and waiting queue for execution. | `Executing`, `Canceled`
+| `Executing` | The job is being executed. | `Canceling`, `Completed`, `Queued`
+| `Completed` | The job was executed successfully and the execution result was returned. |
+| `Failed` | The job was unexpectedly terminated during execution. | `Queued`
+| `Canceling` | Job has received the cancel command, but is still running. | `Completed`, `Canceled`
+| `Canceled` | Job was successfully cancelled. |
+
+|=======================================================================
+
+If all job execution threads are busy, new jobs received by the node are put into job queue according to their <<Job Priority>>. Ignite sorts all incoming jobs first by priority, then by the time, executing jobs queued earlier first.
+
+=== Cancelling Executing Jobs
+
+When the node receives the command to cancel the job in the `Executing` status, it will immediately send an interrupt to the thread that is responsible for the job. In most cases, this will lead to the job being immediately canceled, however there are cases in which the job will continue. If this happens, the job will be in the `Canceling` state. Depending on specific code being executed, the job may complete successfully, be canceled once the uninterruptible operation is finished, or remain in unfinished state (for example, if code is stuck in a loop). You can use the `JobExecution.statusAsync()` method to keep track of what status the job is in, and react to status change.
+
+
+== Job Configuration
+
+=== Job Priority
+
+You can specify a job priority by setting the `JobExecutionOptions.priority` property. Jobs with a higher priority will be queued before jobs with lower priority (for exammple, a job with priority 4 will be executed before the job with priority 2).
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+private void example() {
+    IgniteClient client = client();
+    IgniteCompute compute = client.compute();
+    Set<ClusterNode> nodes = new HashSet<>(client.clusterNodes());
+
+    //Unit `unitName:1.1.1` contains NodeNameJob class.
+    List<DeploymentUnit> units = List.of(new DeploymentUnit("unitName", Version.parseVersion("1.1.1"));
+
+    // Create job execution options
+    JobExecutionOptions options = JobExecutionOptions.builder().priority(1).build();
+
+    JobExecution<String> execution = compute.executeAsync(nodes, units, NodeNameJob.class, options, "Hello");
+    var result = execution.resultAsync()
+}
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IIgniteClient client = Client;
+ICompute compute = client.Compute;
+IList<IClusterNode> nodes = await Client.GetClusterNodesAsync();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+var units = new List<DeploymentUnit> { new DeploymentUnit("unitName", "1.1.1") };
+
+// Create job execution options
+var options = JobExecutionOptions.Default with { Priority = 1 };
+
+IJobExecution<string> execution = await compute.SubmitAsync<string>(nodes, units, NodeNameJob, options, "Hello");
+string result = await execution.GetResultAsync();
+----
+
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+compute comp = client.get_compute();
+std::vector<cluster_node> nodes = client.get_nodes();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+std::vector<deployment_unit> units{deployment_unit{"unitName", "1.1.1"}};
+
+job_execution_options options{1, 0};
+job_execution execution = comp.submit(nodes, units, NODE_NAME_JOB, {std::string("Hello")}, std::move(options));
+std::string result = execution.get_result()->get<std::string>();
+----
+--
+
+=== Job Retries
+
+You can set the number the job will be retried on failure by setting the `JobExecutionOptions.maxRetries` property. If set, the failed job will be retried the specified number of times before movbing to `Failed` state.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+private void example() {
+    IgniteClient client = client();
+    IgniteCompute compute = client.compute();
+    Set<ClusterNode> nodes = new HashSet<>(client.clusterNodes());
+
+    //Unit `unitName:1.1.1` contains NodeNameJob class.
+    List<DeploymentUnit> units = List.of(new DeploymentUnit("unitName", Version.parseVersion("1.1.1"));
+
+    // Create job execution options
+    JobExecutionOptions options = JobExecutionOptions.builder().maxRetries(5).build();
+
+    JobExecution<String> execution = compute.executeAsync(nodes, units, NodeNameJob.class, options, "Hello");
+    var result = execution.resultAsync()
+}
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IIgniteClient client = Client;
+ICompute compute = client.Compute;
+IList<IClusterNode> nodes = await Client.GetClusterNodesAsync();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+var units = new List<DeploymentUnit> { new DeploymentUnit("unitName", "1.1.1") };
+
+// Create job execution options
+var options = JobExecutionOptions.Default with { MaxRetries = 5 };
+
+IJobExecution<string> execution = await compute.SubmitAsync<string>(nodes, units, NodeNameJob, options, "Hello");
+string result = await execution.GetResultAsync();
+----
+
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+compute comp = client.get_compute();
+std::vector<cluster_node> nodes = client.get_nodes();
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+std::vector<deployment_unit> units{deployment_unit{"unitName", "1.1.1"}};
+
+job_execution_options options{0, 5};
+job_execution execution = comp.submit(nodes, units, NODE_NAME_JOB, {std::string("Hello")}, std::move(options));
+std::string result = execution.get_result()->get<std::string>();
+----
+--
+
+== Job Failover
+
+Ignite 3 implements mechanics to handle issues that happen during job execution. The following situations are handled:
+
+=== Worker Node Shutdown
+
+If the [.tooltip]#worker node# is shut down, the [.tooltip]#coordinator node# will redistribute all jobs assigned to worker to other viable nodes. If no nodes are found, the job will fail and an exception will be sent to the client.
+
+=== Coordinator Node Shutdown
+
+If the coordinator node shuts down, all jobs will be cancelled as soon as the node detects that the coordinator is shut down. Note that link:compute/compute#cancelling-executing-jobs[some jobs] may take a long time to cancel.
+
+=== Client Disconnect
+
+If the client disconnects, all jobs will be cancelled as soon as the coordinator node detects the disconnect. Note that link:compute/compute#cancelling-executing-jobs[some jobs] may take a long time to cancel.
+
+== Colocated Computations
+
+In Ignite 3 you can execute colocated computation with `executeColocated` method. When you do it, the compute task is guaranteed to be executed on the nodes that hold the specified key. This can significantly reduce execution time if your tasks require data.
+
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+private void example() {
+    IgniteClient client = client();
+    IgniteCompute compute = client.compute();
+    String table = "Person";
+    String key = "John";
+
+
+    //Unit `unitName:1.1.1` contains NodeNameJob class.
+    List<DeploymentUnit> units = List.of(new DeploymentUnit("unitName", Version.parseVersion("1.1.1"));
+
+    JobExecution<String> execution = compute.executeColocatedAsync(table, key, units, NodeNameJob.class, "Hello");
+    String result = execution.resultAsync().join()
+}
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IIgniteClient client = Client;
+ICompute compute = client.Compute;
+string table = "Person";
+string key = "John";
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+var units = new List<DeploymentUnit> { new DeploymentUnit("unitName", "1.1.1") };
+
+IJobExecution<string> execution = await compute.SubmitColocatedAsync<string, string>(table, key, units, NodeNameJob, "Hello");
+string result = await execution.GetResultAsync();
+----
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+compute comp = client.get_compute();
+std::string table{"Person"};
+std::string key{"John"};
+
+// Unit `unitName:1.1.1` contains NodeNameJob class.
+std::vector<deployment_unit> units{deployment_unit{"unitName", "1.1.1"}};
+
+job_execution execution = comp.submit_colocated(table, key, units, NODE_NAME_JOB, {std::string("Hello")}, {});
+std::string result = execution.get_result()->get<std::string>();
+----
+--
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/data-streamer.adoc b/docs/_docs/developers-guide/data-streamer.adoc
new file mode 100644
index 0000000..4e9977b
--- /dev/null
+++ b/docs/_docs/developers-guide/data-streamer.adoc
@@ -0,0 +1,105 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Streaming Data
+
+To stream a large amount of data, use the data streamer. Data streaming provides a quicker and more efficient way to load, organize and optimally distribute your data. Data streamer accepts a stream of data and distributes data entries across the cluster, where the processing takes place. Data streaming is available in all table views.
+
+image::images/data_streaming.png[]
+
+Data streaming provides at-least-once delivery guarantee.
+
+== Using Data Streamer API
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+RecordView<Tuple> view = defaultTable().recordView();
+
+CompletableFuture<Void> streamerFut;
+
+try (var publisher = new SubmissionPublisher<Tuple>()) {
+    var options = DataStreamerOptions.builder().batchSize(batchSize).build();
+    streamerFut = view.streamData(publisher, options);
+
+    var tuple1 = Tuple.create().set("id", 1L).set("name", "foo");
+    var tuple2 = Tuple.create().set("id", 2L).set("name", "bar");
+    publisher.submit(tuple1);
+    publisher.submit(tuple2);
+}
+
+streamerFut.orTimeout(1, TimeUnit.SECONDS).join();
+----
+
+tab:.NET[]
+[source, csharp]
+----
+public async Task TestBasicStreamingRecordBinaryView()
+{
+    var options = DataStreamerOptions.Default with { BatchSize = 10 };
+    var data = Enumerable.Range(0, Count).Select(x => new IgniteTuple { ["id"] = 1L, ["name"] = "foo" }).ToList();
+
+    await TupleView.StreamDataAsync(data.ToAsyncEnumerable(), options);
+}
+----
+--
+
+== Configuring Data Streamer Properties
+
+All data streamer parameters can be configured by using the `DataStreamerOptions` object. For example, the code snippet below sets the data streamer to have 3 retries:
+
+[tabs]
+--
+tab:Java[]
+[source,java]
+----
+RecordView<Tuple> view = defaultTable().recordView();
+var publisher = new SubmissionPublisher<Tuple>();
+
+var options = DataStreamerOptions.builder()
+.retryLimit(3)
+.build();
+
+CompletableFuture<Void> streamerFut = view.streamData(publisher, options);
+----
+--
+
+=== Tuning Memory Usage
+
+Data streamer may require significant amount of memory to handle the requests in orderly manner. Depending on your environment, you may want to increase or reduce the amount of memory reserved by the data streamer.
+
+For every node in the cluster, the streamer reserves an amount of memory equal to `batchSize` (1000 entries by default) multiplied by `perNodeParallelOperations` (4 by default) setting. For example, a 10-node cluster with default parameters and average entry size of 1KB will reserve 40MB for operations.
+
+You can change these options the same way you would work with any other options:
+
+[tabs]
+--
+tab:Java[]
+[source,java]
+----
+RecordView<Tuple> view = defaultTable().recordView();
+var publisher = new SubmissionPublisher<Tuple>();
+
+var options = DataStreamerOptions.builder()
+.batchSize(10000)
+.perNodeParallelOperations(10)
+.build();
+
+CompletableFuture<Void> streamerFut = view.streamData(publisher, options);
+----
+--
+
+Additionally, the data streamer periodically flushes incomplete buffers to avoid messages being stuck for a long time (a specific buffer can fill up slowly or never fill completely at all, depending on the data distribution). This is configured with the `autoFlushFrequency` (5000ms by default) property.
diff --git a/docs/_docs/developers-guide/java-to-tables.adoc b/docs/_docs/developers-guide/java-to-tables.adoc
new file mode 100644
index 0000000..5671c40
--- /dev/null
+++ b/docs/_docs/developers-guide/java-to-tables.adoc
@@ -0,0 +1,186 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Creating Tables from Java Classes
+
+== Overview
+
+While link:SQL-reference/ddl[SQL DDL] supports a comprehensive set of table manipulation commands, you can also create tables and build indexes directly from POJO using a simple Java API. This API supports custom annotations and simple builders; it works seamlessly with the Mapper interface, thus facilitating link:developers-guide/table-api[keyValueView and recordView].
+
+The Java API lets you perform the following operations:
+
+* CREATE ZONE
+* CREATE TABLE
+* CREATE INDEX
+* DROP ZONE
+* DROP TABLE
+* DROP INDEX
+
+You use the `@Table` and other annotations that are located in the `org.apache.ignite.catalog.annotations` package. 
+
+== Examples
+
+=== KV POJO Compatible with keyValueView
+
+[source, java]
+----
+// annotate
+@Zone(
+        value = "zone_test",
+        partitions = 2,
+        engine = ZoneEngine.ROCKSDB
+)
+class ZoneTest {}
+
+class PojoKey {
+    @Id
+    Integer id;
+    
+    @Id(sort = DESC)
+    @Column(value = "id_str", length = 20)
+    String idStr;
+}
+
+@Table(
+    value = "kv_pojo_test",
+    zone = ZoneTest.class,
+    colocateBy = { @ColumnRef("f_name"), @ColumnRef("l_name") },
+    indexes = { @Index(value = "ix_test", columns = {
+                    @ColumnRef(value = "f_name"), 
+                    @ColumnRef(value = "l_name", sort = DESC_NULLS_LAST) })	
+    }
+)
+class PojoValue {
+    @Column("f_name")
+    String firstName;
+
+    @Column("l_name")
+    String lastName;
+
+    String str;
+}
+
+// execute
+ignite.catalog().create(PojoKey.class, PojoValue.class).execute();
+
+// access
+ignite.tables().table("kv_pojo_test").keyValueView(PojoKey.class, PojoValue.class)
+----
+
+The result is equivalent to the following SQL multi-statement:
+
+[source, sql]
+----
+CREATE ZONE IF NOT EXISTS zone_test ENGINE ROCKSDB WITH PARTITIONS=2;
+
+CREATE TABLE IF NOT EXISTS kv_pojo_test (
+	id int,
+	id_str varchar(20),
+	f_name varchar,
+	l_name varchar,
+	str varchar,
+	PRIMARY KEY (id, id_str desc)
+)
+COLOCATE BY (f_name, l_name)
+WITH PRIMARY_ZONE='ZONE_TEST';
+
+CREATE INDEX ix_test (f_name, l_name desc nulls last);
+----
+
+You can get the SQL string like this:
+
+[source, java]
+----
+ignite.catalog().create(...).toSqlString()
+----
+
+=== Single POJO Compatible with recordView
+
+[source, java]
+----
+// annotate
+@Table(
+    value = "pojo_test",
+    zone = ZoneTest.class,
+    colocateBy = { @ColumnRef("f_name"), @ColumnRef("l_name") },
+    indexes = { @Index(value = "ix_test", columns = {
+                      @ColumnRef(value = "f_name"), 
+                      @ColumnRef(value = "l_name", sort = DESC_NULLS_LAST)}
+    }
+)
+class Pojo {
+    @Id
+    Integer id;
+
+    @Id(sort = DESC)
+    @Column(value = "id_str", length = 20)
+    String idStr;
+
+    @Column("f_name")
+    String firstName;
+
+    @Column("l_name")
+    String lastName;
+
+    String str;
+}
+
+// execute
+ignite.catalog().create(Pojo.class).execute();
+
+// access
+ignite.tables().table("pojo_test").recordView(Pojo.class)
+----
+
+=== The Builder Alternative to the @Table Annotation
+
+NOTE: With builders, only `@Id` and `@Column` annotations on fields are considered.
+
+[source, java]
+----
+class Pojo {
+    @Id
+    Integer id;
+
+    @Id(sort = DESC)
+    @Column(value = "id_str", length = 20)
+    String idStr;
+
+    @Column("f_name")
+    String firstName;
+
+    @Column("l_name")
+    String lastName;
+
+    String str;
+}
+
+ignite.catalog()
+  .create(ZoneDefinition.builder("zone_test")
+    .partitions(2))
+  .execute();
+
+ignite.catalog()
+  .create(TableDefinition.builder("pojo_test")
+    .ifNotExists()
+  	.colocateBy("id", "id_str")
+  	.zone("zone_test")
+    .record(Pojo.class) // .key(Key.class).value(Value.class)
+    .build())
+  .execute();
+----
+
+== Next Steps
+
+Once you have created a table using the Java API, you can manipulate it using the link:SQL-reference/ddl[SQL commands].
\ No newline at end of file
diff --git a/docs/_docs/rest/rest-api.adoc b/docs/_docs/developers-guide/rest/rest-api.adoc
similarity index 85%
rename from docs/_docs/rest/rest-api.adoc
rename to docs/_docs/developers-guide/rest/rest-api.adoc
index 5901089..96cb0b8 100644
--- a/docs/_docs/rest/rest-api.adoc
+++ b/docs/_docs/developers-guide/rest/rest-api.adoc
@@ -12,14 +12,25 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
+= REST API
 
-= Ignite REST API
-
-The Apache Ignite 3 clusters provide an link:https://www.openapis.org/[OpenAPI] specification that can be used to work with Ignite 3 by standard REST methods. The link:https://github.com/apache/ignite-3/tree/main/modules/rest-api/openapi/openapi.yaml[openapi.yaml,window=_blank] specification file for the version is included with its release.
+The Ignite 3, clusters provide an link:https://www.openapis.org/[OpenAPI] specification that can be used to work with Ignite 3 by standard REST methods.
 
 We recommend that you generate client code in your project language by using an link:https://github.com/OpenAPITools/openapi-generator[OpenAPI code generator,window=_blank]. Below is the example of how you can do this for a Java project.
 
-== Example Java Project Configuration
+== Getting Started
+
+You do not need to explicitly configure REST on the cluster. The connector starts up automatically and listens on port 8080. You can check if it works with curl:
+
+[source, bash]
+----
+curl 'http://localhost:10300/management/v1/cluster/state'
+----
+
+
+== Java Project Configuration
+
+
 
 1. Add an link:https://github.com/OpenAPITools/openapi-generator/tree/master/modules/openapi-generator-maven-plugin[open api generator,window=_blank] maven plugin to your project's pom.xml.
 +
diff --git a/docs/_docs/sql/calcite-based-sql-engine.adoc b/docs/_docs/developers-guide/sql/calcite-based-sql-engine.adoc
similarity index 89%
rename from docs/_docs/sql/calcite-based-sql-engine.adoc
rename to docs/_docs/developers-guide/sql/calcite-based-sql-engine.adoc
index 19c94eb..160b382 100644
--- a/docs/_docs/sql/calcite-based-sql-engine.adoc
+++ b/docs/_docs/developers-guide/sql/calcite-based-sql-engine.adoc
@@ -14,7 +14,7 @@
 // limitations under the License.
 = Introduction
 
-Apache Ignite 3.0 Beta introduces new SQL engine based on the Apache Calcite framework to parse and optimize queries and generate execution plans. Previously, it was based on H2 Database.
+Ignite 3 introduces new SQL engine based on the Apache Calcite framework to parse and optimize queries and generate execution plans. Previously, it was based on H2 Database.
 
 Apache Calcite is a dynamic data management framework, which mainly serves for mediating between applications and one or more data storage locations and data processing engines.
 For more information on Apache Calcite, please see the link:https://calcite.apache.org/docs/[product documentation,window=_blank].
diff --git a/docs/_docs/sql/jdbc-driver.adoc b/docs/_docs/developers-guide/sql/jdbc-driver.adoc
similarity index 66%
rename from docs/_docs/sql/jdbc-driver.adoc
rename to docs/_docs/developers-guide/sql/jdbc-driver.adoc
index 3bc96af..e7e906e 100644
--- a/docs/_docs/sql/jdbc-driver.adoc
+++ b/docs/_docs/developers-guide/sql/jdbc-driver.adoc
@@ -14,29 +14,24 @@
 // limitations under the License.
 = JDBC Driver
 
-Apache Ignite is shipped with JDBC driver that allows processing of distributed data using standard SQL statements like `SELECT`, `INSERT`, `UPDATE`, or `DELETE` directly from the JDBC side.
+Ignite is shipped with JDBC driver that allows processing of distributed data using standard SQL statements like `SELECT`, `INSERT`, `UPDATE`, or `DELETE` directly from the JDBC side.
 
-WARNING:
-----
-This Beta release implementation of JDBC driver does not support the following functionality:
+This implementation of JDBC driver does not support the following functionality:
 
 * SSL/TLS connection;
 * Multiple Endpoints;
-* Partition Awareness;
+* Multi-statement requests;
 * `CREATE TABLE`, `ALTER TABLE`, `WITH`, and `MERGE` commands.
-----
 
 == Setting Up
 
-The name of the driver’s class is `org.apache.ignite.internal.jdbc.IgniteJdbcDriver`. For instance, this is how you can open a JDBC connection to the cluster node listening on IP address `192.168.0.50`:
+JDBC driver uses the client connector to work with the cluster. For more information on configuring client connector, see link:clients/overview#client-connector-configuration[Client Connector Configuration].
+
+Here is how you can open a JDBC connection to the cluster node listening on IP address `192.168.0.50`:
 
 [source, java]
 ----
-// Load JDBC drivers.
-ServiceLoader.load(java.sql.Driver.class);
-
-// Open the JDBC connection.
-Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1:10800");
+Connection conn = DriverManager.getConnection("jdbc:ignite:thin://192.168.0.50:10800");
 ----
 
 The driver connects to one of the cluster nodes and forwards all the queries to it for final execution. The node handles the query distribution and the result’s aggregations. Then the result is sent back to the client application.
@@ -60,11 +55,38 @@
 * `port_from` is the beginning of the port range to use to open the connection. 10800 is used by default if this parameter is omitted.
 * `port_to` is optional. It is set to the `port_from` value by default if this parameter is omitted.
 * `schema` is the schema name to access. PUBLIC is used by default. This name should correspond to the SQL ANSI-99 standard. Non-quoted identifiers are not case sensitive. Quoted identifiers are case sensitive. When semicolon format is used, the schema may be defined as a parameter with name schema.
-* `<params>` are optional.
+* `<params>` are optional parameters. The following parameters are available:
+- `username` - user name for basic authentication to the cluster.
+- `password` - user password for basic authentication to the cluster.
+
+
+== Performing Transactions
+
+With the JDBC driver, you can  perform `commit` and `rollback` transactions. For more information about transactions, see link:transactions/performing-transactions[Performing Transactions].
+
+Here is how you can commit a transaction:
+
+[source, java]
+----
+// Open the JDBC connection.
+Connection conn = DriverManager.getConnection("jdbc:ignite:thin://192.168.0.50:10800");
+
+// Commit a transaction
+conn.commit();
+----
+
+You can also configure Ignite to automatically commit transactions by using the `setAutoCommit()` method.
+
+Here is how you can rollback a transaction:
+
+[source, java]
+----
+conn.rollback();
+----
 
 == Running an Example
 
-Examples are shipped as a separate Maven project, which is located in the `examples` folder. `SqlJdbcExample` demonstrates the usage of the Apache Ignite JDBC driver.
+Examples are shipped as a separate Maven project, which is located in the `examples` folder. `SqlJdbcExample` demonstrates the usage of the Ignite JDBC driver.
 
 To run `SqlJdbcExample`, perform the following steps:
 
diff --git a/docs/_docs/sql/odbc/connection-string.adoc b/docs/_docs/developers-guide/sql/odbc/connection-string.adoc
similarity index 65%
rename from docs/_docs/sql/odbc/connection-string.adoc
rename to docs/_docs/developers-guide/sql/odbc/connection-string.adoc
index 770e219..c8de66a 100644
--- a/docs/_docs/sql/odbc/connection-string.adoc
+++ b/docs/_docs/developers-guide/sql/odbc/connection-string.adoc
@@ -1,3 +1,17 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 = Connection String
 
 
@@ -40,6 +54,14 @@
 This argument value is ignored if `ADDRESS` argument is specified.
 |`10800`
 
+|IDENTITY
+|Identity to use for authentication. Depending on the authenticator used on the server side, it can be a user name or another unique identifier. See the link:security/authentication[Authentication] topic for details.
+|None.
+
+|SECRET
+|Secret to use for authentication. Depending on the authenticator used on the server side, it can be a user password or another type of user-specific secret. See the link:security/authentication[Authentication] topic for details.
+|None.
+
 |`SCHEMA`
 |Schema name.
 |`PUBLIC`
@@ -69,6 +91,12 @@
 DRIVER={Apache Ignite 3};ADDRESS=localhost:10800
 ----
 
+tab:Authentication[]
+[source,text]
+----
+DRIVER={Apache Ignite 3};ADDRESS=localhost:10800;IDENTITY=yourid;SECRET=yoursecret
+----
+
 tab:Custom page size[]
 [source,text]
 ----
diff --git a/docs/_docs/developers-guide/sql/odbc/index.adoc b/docs/_docs/developers-guide/sql/odbc/index.adoc
new file mode 100644
index 0000000..0389e81
--- /dev/null
+++ b/docs/_docs/developers-guide/sql/odbc/index.adoc
@@ -0,0 +1,20 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+---
+layout: toc
+---
+
+= ODBC Driver
+
diff --git a/docs/_docs/developers-guide/sql/odbc/odbc-driver.adoc b/docs/_docs/developers-guide/sql/odbc/odbc-driver.adoc
new file mode 100644
index 0000000..a76146a
--- /dev/null
+++ b/docs/_docs/developers-guide/sql/odbc/odbc-driver.adoc
@@ -0,0 +1,131 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= ODBC Driver
+
+== Overview
+
+Ignite 3 includes an ODBC driver that allows you both to select and to modify data stored in a distributed cache by using standard SQL queries and native ODBC API. ODBC driver uses your link:clients/overview[client connection configuration].
+
+ODBC driver only provides thread-safety at the connections level. This means that you should not access the same connection from multiple threads without additional synchronization, though you can create separate connections for every thread and use them simultaneously.
+
+The ODBC driver implements version 3.8 of the ODBC API. For detailed information on ODBC please refer to link:https://msdn.microsoft.com/en-us/library/ms714177.aspx[ODBC Programmer's Reference].
+
+== Installing ODBC Driver
+
+To use ODBC driver, register it in your system so that your ODBC Driver Manager will be able to locate it.
+
+=== Installing on Windows
+
+
+==== Prerequisites
+
+Microsoft Visual C++ 2017 Redistributable Package should be installed first.
+
+==== Installation process
+
+Launch the provided installer and follow the instructions.
+
+=== Configuring the Cluster
+
+ODBC driver uses the client connector to work with the cluster. Make sure to configure the port to the one you intend to use, for example:
+
+----
+node config update clientConnector.port=10469
+----
+
+For more information on configuring client connector, see link:clients/overview#client-connector-configuration[Client Connector Configuration].
+
+=== Installing on Linux
+
+To build and install ODBC driver on Linux, you need to first install
+ODBC Driver Manager. The ODBC driver has been tested with link:http://www.unixodbc.org[UnixODBC].
+
+==== Prerequisites
+
+Install the following first:
+
+- link:https://gcc.gnu.org/onlinedocs/libstdc%2B%2B[libstdc] library supporting C++14 standard;
+- link:http://www.unixodbc.org[UnixODBC] driver manager.
+
+==== Download from website
+
+You can get the built rpm or deb package from the provided website. Then, install the package locally to use it.
+
+== Supported Data Types
+
+The following SQL data types are supported:
+
+- `SQL_CHAR`
+- `SQL_VARCHAR`
+- `SQL_LONGVARCHAR`
+- `SQL_SMALLINT`
+- `SQL_INTEGER`
+- `SQL_FLOAT`
+- `SQL_DOUBLE`
+- `SQL_BIT`
+- `SQL_TINYINT`
+- `SQL_BIGINT`
+- `SQL_BINARY`
+- `SQL_VARBINARY`
+- `SQL_LONGVARBINARY`
+- `SQL_GUID`
+- `SQL_DECIMAL`
+- `SQL_TYPE_DATE`
+- `SQL_TYPE_TIMESTAMP`
+- `SQL_TYPE_TIME`
+
+== Using pyodbc
+
+Ignite can be used with link:https://pypi.org/project/pyodbc/[pyodbc]. Here is how you can use pyodbc in Ignite 3:
+
+- Install pyodbc
++
+[source,shell]
+----
+pip3 install pyodbc
+----
++
+- Import pyodbc to your project:
++
+[source,python]
+----
+import pyodbc
+----
++
+- Connect to the database:
++
+[source,python]
+----
+conn = pyodbc.connect('Driver={Apache Ignite 3};Address=127.0.0.1:10800;')
+----
++
+- Set encoding to UTF-8:
++
+[source,python]
+----
+conn.setencoding(encoding='utf-8')
+conn.setdecoding(sqltype=pyodbc.SQL_CHAR, encoding="utf-8")
+conn.setdecoding(sqltype=pyodbc.SQL_WCHAR, encoding="utf-8")
+----
++
+- Get data from your database:
++
+[source,python]
+----
+cursor = conn.cursor()
+cursor.execute('SELECT * FROM table_name')
+----
+
+For more information on using pyodbc, use the link:https://github.com/mkleehammer/pyodbc/wiki[official documentation].
\ No newline at end of file
diff --git a/docs/_docs/sql/odbc/querying-modifying-data.adoc b/docs/_docs/developers-guide/sql/odbc/querying-modifying-data.adoc
similarity index 91%
rename from docs/_docs/sql/odbc/querying-modifying-data.adoc
rename to docs/_docs/developers-guide/sql/odbc/querying-modifying-data.adoc
index 18f2425..22728ae 100644
--- a/docs/_docs/sql/odbc/querying-modifying-data.adoc
+++ b/docs/_docs/developers-guide/sql/odbc/querying-modifying-data.adoc
@@ -1,3 +1,17 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 = Querying and Modifying Data
 
 == Overview
diff --git a/docs/_docs/developers-guide/sql/odbc/specification.adoc b/docs/_docs/developers-guide/sql/odbc/specification.adoc
new file mode 100644
index 0000000..4845ac0
--- /dev/null
+++ b/docs/_docs/developers-guide/sql/odbc/specification.adoc
@@ -0,0 +1,972 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= ODBC Standard Conformance
+
+== Overview
+
+ODBC defines several Interface conformance levels. In this section you can find which features are supported by the Apache Ignite ODBC driver.
+
+== Core Interface Conformance
+
+[width="100%",cols="60%,10%,30%"]
+|=======================================================================
+|Feature |Supported|Comments
+
+|Allocate and free all types of handles, by calling `SQLAllocHandle` and `SQLFreeHandle`.
+|YES
+|
+
+|Use all forms of the `SQLFreeStmt` function.
+|YES
+|
+
+|Bind result set columns, by calling `SQLBindCol`.
+|YES
+|
+
+|Handle dynamic parameters, including arrays of parameters, in the input direction only, by calling `SQLBindParameter` and `SQLNumParams`.
+|YES
+|
+
+|Specify a bind offset.
+|YES
+|
+
+|Use the data-at-execution dialog, involving calls to `SQLParamData` and `SQLPutData`
+|YES
+|
+
+|Manage cursors and cursor names, by calling `SQLCloseCursor`, `SQLGetCursorName`, and `SQLSetCursorName`.
+|PARTIALLY
+|`SQLCloseCursor` is implemented. Named cursors are not supported by Ignite SQL.
+
+|Gain access to the description (metadata) of result sets, by calling `SQLColAttribute`, `SQLDescribeCol`, `SQLNumResultCols`, and `SQLRowCount`.
+|YES
+|
+
+|Query the data dictionary, by calling the catalog functions `SQLColumns`, `SQLGetTypeInfo`, `SQLStatistics`, and `SQLTables`.
+|PARTIALLY
+|`SQLStatistics` is not supported.
+
+|Manage data sources and connections, by calling `SQLConnect`, `SQLDataSources`, `SQLDisconnect`, and `SQLDriverConnect`. Obtain information on drivers, no matter which ODBC level they support, by calling `SQLDrivers`.
+|PARTIALLY
+|DSN support is not implemented.
+
+|Prepare and execute SQL statements, by calling `SQLExecDirect`, `SQLExecute`, and `SQLPrepare`.
+|YES
+|`SQLPrepare` is not supported on SQL side.
+
+|Fetch one row of a result set or multiple rows, in the forward direction only, by calling `SQLFetch` or by calling `SQLFetchScroll` with the `FetchOrientation` argument set to `SQL_FETCH_NEXT`
+|YES
+|
+
+|Obtain an unbound column in parts, by calling `SQLGetData`.
+|YES
+|
+
+|Obtain current values of all attributes, by calling `SQLGetConnectAttr`, `SQLGetEnvAttr`, and `SQLGetStmtAttr`, and set all attributes to their default values and set certain attributes to non-default values by calling `SQLSetConnectAttr`, `SQLSetEnvAttr`, and `SQLSetStmtAttr`.
+|PARTIALLY
+|Not all attributes are currently supported. See table below for details.
+
+|Manipulate certain fields of descriptors, by calling `SQLCopyDesc`, `SQLGetDescField`, `SQLGetDescRec`, `SQLSetDescField`, and `SQLSetDescRec`.
+|NO
+|
+
+|Obtain diagnostic information, by calling `SQLGetDiagField` and `SQLGetDiagRec`.
+|YES
+|
+
+|Detect driver capabilities, by calling `SQLGetFunctions` and `SQLGetInfo`. Also, detect the result of any text substitutions made to an SQL statement before it is sent to the data source, by calling `SQLNativeSql`.
+|YES
+|
+
+|Use the syntax of `SQLEndTran` to commit a transaction. A Core-level driver need not support true transactions; therefore, the application cannot specify `SQL_ROLLBACK` nor `SQL_AUTOCOMMIT_OFF` for the `SQL_ATTR_AUTOCOMMIT` connection attribute.
+|YES
+|
+
+|Call `SQLCancel` to cancel the data-at-execution dialog and, in multi-thread environments, to cancel an ODBC function executing in another thread. Core-level interface conformance does not mandate support for asynchronous execution of functions, nor the use of `SQLCancel` to cancel an ODBC function executing asynchronously. Neither the platform nor the ODBC driver need be multi-thread for the driver to conduct independent activities at the same time. However, in multi-thread environments, the ODBC driver must be thread-safe. Serialization of requests from the application is a conformant way to implement this specification, even though it might create serious performance problems.
+|NO
+|
+
+|Obtain the `SQL_BEST_ROWID` row-identifying column of tables, by calling `SQLSpecialColumns`.
+|PARTIALLY
+|Current implementation always returns an empty row set.
+
+|=======================================================================
+
+
+== Level 1 Interface Conformance
+[width="100%",cols="60%,10%,30%"]
+|=======================================================================
+|Feature|Supported|Comments
+
+|Specify the schema of database tables and views (using two-part naming).
+|YES
+|
+
+|Invoke true asynchronous execution of ODBC functions, where applicable ODBC functions are all synchronous or all asynchronous on a given connection.
+|NO
+|
+
+|Use scrollable cursors, and thereby achieve access to a result set in methods other than forward-only, by calling `SQLFetchScroll` with the `FetchOrientation` argument other than `SQL_FETCH_NEXT`.
+|NO
+|
+
+|Obtain primary keys of tables, by calling `SQLPrimaryKeys`.
+|YES
+|
+
+|Use stored procedures, through the ODBC escape sequence for procedure calls, and query the data dictionary regarding stored procedures, by calling `SQLProcedureColumns` and `SQLProcedures`.
+|NO
+|
+
+|Connect to a data source by interactively browsing the available servers, by calling `SQLBrowseConnect`.
+|NO
+|
+
+|Use ODBC functions instead of SQL statements to perform certain database operations: `SQLSetPos` with `SQL_POSITION` and `SQL_REFRESH`.
+|NO
+|
+
+|Gain access to the contents of multiple result sets generated by batches and stored procedures, by calling `SQLMoreResults`.
+|YES
+|
+
+|Delimit transactions spanning several ODBC functions, with true atomicity and the ability to specify `SQL_ROLLBACK` in `SQLEndTran`.
+|YES
+|
+|=======================================================================
+
+== Level 2 Interface Conformance
+[width="100%",cols="60%,10%,30%"]
+|=======================================================================
+|Feature|Supported|Comments
+
+|Use three-part names of database tables and views.
+|NO
+|Ignite SQL does not support catalogs.
+
+|Describe dynamic parameters, by calling `SQLDescribeParam`.
+|NO
+|
+
+|Use not only input parameters but also output and input/output parameters, and result values of stored procedures.
+|NO
+|Ignite SQL does not support output parameters
+
+|Use bookmarks, including retrieving bookmarks, by calling `SQLDescribeCol` and `SQLColAttribute` on column number 0; fetching based on a bookmark, by calling `SQLFetchScroll` with the `FetchOrientation` argument set to `SQL_FETCH_BOOKMARK`; and update, delete, and fetch by bookmark operations, by calling `SQLBulkOperations` with the Operation argument set to `SQL_UPDATE_BY_BOOKMARK`, `SQL_DELETE_BY_BOOKMARK`, or `SQL_FETCH_BY_BOOKMARK`.
+|NO
+|Ignite SQL does not support bookmarks.
+
+|Retrieve advanced information about the data dictionary, by calling `SQLColumnPrivileges`, `SQLForeignKeys`, and `SQLTablePrivileges`.
+|PARTIALLY
+|`SQLForeignKeys` implemented, but returns empty result set.
+
+|Use ODBC functions instead of SQL statements to perform additional database operations, by calling `SQLBulkOperations` with `SQL_ADD`, or `SQLSetPos` with `SQL_DELETE` or `SQL_UPDATE`.
+|NO
+|
+
+|Enable asynchronous execution of ODBC functions for specified individual statements.
+|NO
+|
+
+|Obtain the `SQL_ROWVER` row-identifying column of tables, by calling `SQLSpecialColumns`.
+|NO
+|
+
+|Set the `SQL_ATTR_CONCURRENCY` statement attribute to at least one value other than `SQL_CONCUR_READ_ONLY`.
+|NO
+|
+
+|The ability to time out login request and SQL queries (`SQL_ATTR_LOGIN_TIMEOUT` and `SQL_ATTR_QUERY_TIMEOUT`).
+|YES
+|
+
+|The ability to change the default isolation level; the ability to execute transactions with the "serializable" level of isolation.
+|NO
+|
+|=======================================================================
+
+== Function Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Function|Supported|Conformance level
+
+|`SQLAllocHandle`
+|YES
+|Core
+
+|`SQLBindCol`
+|YES
+|Core
+
+|`SQLBindParameter`
+|YES
+|Core
+
+|`SQLBrowseConnect`
+|NO
+|Level 1
+
+|`SQLBulkOperations`
+|NO
+|Level 1
+
+|`SQLCancel`
+|NO
+|Core
+
+|`SQLCloseCursor`
+|YES
+|Core
+
+|`SQLColAttribute`
+|YES
+|Core
+
+|`SQLColumnPrivileges`
+|NO
+|Level 2
+
+|`SQLColumns`
+|YES
+|Core
+
+|`SQLConnect`
+|YES
+|Core
+
+|`SQLCopyDesc`
+|NO
+|Core
+
+|`SQLDataSources`
+|N/A
+|Core
+
+|`SQLDescribeCol`
+|YES
+|Core
+
+|`SQLDescribeParam`
+|NO
+|Level 2
+
+|`SQLDisconnect`
+|YES
+|Core
+
+|`SQLDriverConnect`
+|YES
+|Core
+
+|`SQLDrivers`
+|N/A
+|Core
+
+|`SQLEndTran`
+|PARTIALLY
+|Core
+
+|`SQLExecDirect`
+|YES
+|Core
+
+|`SQLExecute`
+|YES
+|Core
+
+|`SQLFetch`
+|YES
+|Core
+
+|`SQLFetchScroll`
+|YES
+|Core
+
+|`SQLForeignKeys`
+|PARTIALLY
+|Level 2
+
+|`SQLFreeHandle`
+|YES
+|Core
+
+|`SQLFreeStmt`
+|YES
+|Core
+
+|`SQLGetConnectAttr`
+|PARTIALLY
+|Core
+
+|`SQLGetCursorName`
+|NO
+|Core
+
+|`SQLGetData`
+|YES
+|Core
+
+|`SQLGetDescField`
+|NO
+|Core
+
+|`SQLGetDescRec`
+|NO
+|Core
+
+|`SQLGetDiagField`
+|YES
+|Core
+
+|`SQLGetDiagRec`
+|YES
+|Core
+
+|`SQLGetEnvAttr`
+|PARTIALLY
+|Core
+
+|`SQLGetFunctions`
+|NO
+|Core
+
+|`SQLGetInfo`
+|YES
+|Core
+
+|`SQLGetStmtAttr`
+|PARTIALLY
+|Core
+
+|`SQLGetTypeInfo`
+|YES
+|Core
+
+|`SQLMoreResults`
+|YES
+|Level 1
+
+|`SQLNativeSql`
+|YES
+|Core
+
+|`SQLNumParams`
+|YES
+|Core
+
+|`SQLNumResultCols`
+|YES
+|Core
+
+|`SQLParamData`
+|YES
+|Core
+
+|`SQLPrepare`
+|YES
+|Core
+
+|`SQLPrimaryKeys`
+|YES
+|Level 1
+
+|`SQLProcedureColumns`
+|NO
+|Level 1
+
+|`SQLProcedures`
+|NO
+|Level 1
+
+|`SQLPutData`
+|YES
+|Core
+
+|`SQLRowCount`
+|YES
+|Core
+
+|`SQLSetConnectAttr`
+|PARTIALLY
+|Core
+
+|`SQLSetCursorName`
+|NO
+|Core
+
+|`SQLSetDescField`
+|NO
+|Core
+
+|`SQLSetDescRec`
+|NO
+|Core
+
+|`SQLSetEnvAttr`
+|PARTIALLY
+|Core
+
+|`SQLSetPos`
+|NO
+|Level 1
+
+|`SQLSetStmtAttr`
+|PARTIALLY
+|Core
+
+|`SQLSpecialColumns`
+|PARTIALLY
+|Core
+
+|`SQLStatistics`
+|NO
+|Core
+
+|`SQLTablePrivileges`
+|NO
+|Level 2
+
+|`SQLTables`
+|YES
+|Core
+|=======================================================================
+
+== Environment Attribute Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Feature|Supported|Conformance Level
+
+|`SQL_ATTR_CONNECTION_POOLING`
+|NO
+|Optional
+
+|`SQL_ATTR_CP_MATCH`
+|NO
+|Optional
+
+|`SQL_ATTR_ODBC_VER`
+|YES
+|Core
+
+|`SQL_ATTR_OUTPUT_NTS`
+|YES
+|Optional
+|=======================================================================
+
+== Connection Attribute Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Feature|Supported|Conformance Level
+
+|`SQL_ATTR_ACCESS_MODE`
+|NO
+|Core
+
+|`SQL_ATTR_ASYNC_ENABLE`
+|NO
+|Level 1 / Level 2
+
+|`SQL_ATTR_AUTO_IPD`
+|NO
+|Level 2
+
+|`SQL_ATTR_AUTOCOMMIT`
+|YES
+|Level 1
+
+|`SQL_ATTR_CONNECTION_DEAD`
+|YES
+|Level 1
+
+|`SQL_ATTR_CONNECTION_TIMEOUT`
+|YES
+|Level 2
+
+|`SQL_ATTR_CURRENT_CATALOG`
+|NO
+|Level 2
+
+|`SQL_ATTR_LOGIN_TIMEOUT`
+|YES
+|Level 2
+
+|`SQL_ATTR_ODBC_CURSORS`
+|NO
+|Core
+
+|`SQL_ATTR_PACKET_SIZE`
+|NO
+|Level 2
+
+|`SQL_ATTR_QUIET_MODE`
+|NO
+|Core
+
+|`SQL_ATTR_TRACE`
+|NO
+|Core
+
+|`SQL_ATTR_TRACEFILE`
+|NO
+|Core
+
+|`SQL_ATTR_TRANSLATE_LIB`
+|NO
+|Core
+
+|`SQL_ATTR_TRANSLATE_OPTION`
+|NO
+|Core
+
+|`SQL_ATTR_TXN_ISOLATION`
+|NO
+|Level 1 / Level 2
+|=======================================================================
+
+== Statement Attribute Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Feature|Supported|Conformance Level
+
+|`SQL_ATTR_APP_PARAM_DESC`
+|PARTIALLY
+|Core
+
+|`SQL_ATTR_APP_ROW_DESC`
+|PARTIALLY
+|Core
+
+|`SQL_ATTR_ASYNC_ENABLE`
+|NO
+|Level 1/ Level 2
+
+|`SQL_ATTR_CONCURRENCY`
+|NO
+|Level 1 / Level 2
+
+|`SQL_ATTR_CURSOR_SCROLLABLE`
+|NO
+|Level 1
+
+|`SQL_ATTR_CURSOR_SENSITIVITY`
+|NO
+|Level 2
+
+|`SQL_ATTR_CURSOR_TYPE`
+|NO
+|Level 1 / Level 2
+
+|`SQL_ATTR_ENABLE_AUTO_IPD`
+|NO
+|Level 2
+
+|`SQL_ATTR_FETCH_BOOKMARK_PTR`
+|NO
+|Level 2
+
+|`SQL_ATTR_IMP_PARAM_DESC`
+|PARTIALLY
+|Core
+
+|`SQL_ATTR_IMP_ROW_DESC`
+|PARTIALLY
+|Core
+
+|`SQL_ATTR_KEYSET_SIZE`
+|NO
+|Level 2
+
+|`SQL_ATTR_MAX_LENGTH`
+|NO
+|Level 1
+
+|`SQL_ATTR_MAX_ROWS`
+|NO
+|Level 1
+
+|`SQL_ATTR_METADATA_ID`
+|NO
+|Core
+
+|`SQL_ATTR_NOSCAN`
+|NO
+|Core
+
+|`SQL_ATTR_PARAM_BIND_OFFSET_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_PARAM_BIND_TYPE`
+|NO
+|Core
+
+|`SQL_ATTR_PARAM_OPERATION_PTR`
+|NO
+|Core
+
+|`SQL_ATTR_PARAM_STATUS_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_PARAMS_PROCESSED_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_PARAMSET_SIZE`
+|YES
+|Core
+
+|`SQL_ATTR_QUERY_TIMEOUT`
+|YES
+|Level 2
+
+|`SQL_ATTR_RETRIEVE_DATA`
+|NO
+|Level 1
+
+|`SQL_ATTR_ROW_ARRAY_SIZE`
+|YES
+|Core
+
+|`SQL_ATTR_ROW_BIND_OFFSET_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_ROW_BIND_TYPE`
+|YES
+|Core
+
+|`SQL_ATTR_ROW_NUMBER`
+|NO
+|Level 1
+
+|`SQL_ATTR_ROW_OPERATION_PTR`
+|NO
+|Level 1
+
+|`SQL_ATTR_ROW_STATUS_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_ROWS_FETCHED_PTR`
+|YES
+|Core
+
+|`SQL_ATTR_SIMULATE_CURSOR`
+|NO
+|Level 2
+
+|`SQL_ATTR_USE_BOOKMARKS`
+|NO
+|Level 2
+|=======================================================================
+
+== Descriptor Header Fields Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Feature|Supported|Conformance Level
+
+|`SQL_DESC_ALLOC_TYPE`
+|NO
+|Core
+
+|`SQL_DESC_ARRAY_SIZE`
+|NO
+|Core
+
+|`SQL_DESC_ARRAY_STATUS_PTR`
+|NO
+|Core / Level 1
+
+|`SQL_DESC_BIND_OFFSET_PTR`
+|NO
+|Core
+
+|`SQL_DESC_BIND_TYPE`
+|NO
+|Core
+
+|`SQL_DESC_COUNT`
+|NO
+|Core
+
+|`SQL_DESC_ROWS_PROCESSED_PTR`
+|NO
+|Core
+|=======================================================================
+
+== Descriptor Record Fields Conformance
+[width="100%",cols="70%,15%,15%"]
+|=======================================================================
+|Feature|Supported|Conformance Level
+
+|`SQL_DESC_AUTO_UNIQUE_VALUE`
+|NO
+|Level 2
+
+|`SQL_DESC_BASE_COLUMN_NAME`
+|NO
+|Core
+
+|`SQL_DESC_BASE_TABLE_NAME`
+|NO
+|Level 1
+
+|`SQL_DESC_CASE_SENSITIVE`
+|NO
+|Core
+
+|`SQL_DESC_CATALOG_NAME`
+|NO
+|Level 2
+
+|`SQL_DESC_CONCISE_TYPE`
+|NO
+|Core
+
+|`SQL_DESC_DATA_PTR`
+|NO
+|Core
+
+|`SQL_DESC_DATETIME_INTERVAL_CODE`
+|NO
+|Core
+
+|`SQL_DESC_DATETIME_INTERVAL_PRECISION`
+|NO
+|Core
+
+|`SQL_DESC_DISPLAY_SIZE`
+|NO
+|Core
+
+|`SQL_DESC_FIXED_PREC_SCALE`
+|NO
+|Core
+
+|`SQL_DESC_INDICATOR_PTR`
+|NO
+|Core
+
+|`SQL_DESC_LABEL`
+|NO
+|Level 2
+
+|`SQL_DESC_LENGTH`
+|NO
+|Core
+
+|`SQL_DESC_LITERAL_PREFIX`
+|NO
+|Core
+
+|`SQL_DESC_LITERAL_SUFFIX`
+|NO
+|Core
+
+|`SQL_DESC_LOCAL_TYPE_NAME`
+|NO
+|Core
+
+|`SQL_DESC_NAME`
+|NO
+|Core
+
+|`SQL_DESC_NULLABLE`
+|NO
+|Core
+
+|`SQL_DESC_OCTET_LENGTH`
+|NO
+|Core
+
+|`SQL_DESC_OCTET_LENGTH_PTR`
+|NO
+|Core
+
+|`SQL_DESC_PARAMETER_TYPE`
+|NO
+|Core / Level 2
+
+|`SQL_DESC_PRECISION`
+|NO
+|Core
+
+|`SQL_DESC_ROWVER`
+|NO
+|Level 1
+
+|`SQL_DESC_SCALE`
+|NO
+|Core
+
+|`SQL_DESC_SCHEMA_NAME`
+|NO
+|Level 1
+
+|`SQL_DESC_SEARCHABLE`
+|NO
+|Core
+
+|`SQL_DESC_TABLE_NAME`
+|NO
+|Level 1
+
+|`SQL_DESC_TYPE`
+|NO
+|Core
+
+|`SQL_DESC_TYPE_NAME`
+|NO
+|Core
+
+|`SQL_DESC_UNNAMED`
+|NO
+|Core
+
+|`SQL_DESC_UNSIGNED`
+|NO
+|Core
+
+|`SQL_DESC_UPDATABLE`
+|NO
+|Core
+
+|=======================================================================
+
+== SQL Data Types
+
+The following SQL data types listed in the link:https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/sql-data-types[specification] are supported:
+
+[width="100%",cols="80%,20%"]
+|=======================================================================
+|Data Type |Supported
+
+|`SQL_CHAR`
+|YES
+
+|`SQL_VARCHAR`
+|YES
+
+|`SQL_LONGVARCHAR`
+|YES
+
+|`SQL_WCHAR`
+|NO
+
+|`SQL_WVARCHAR`
+|NO
+
+|`SQL_WLONGVARCHAR`
+|NO
+
+|`SQL_DECIMAL`
+|YES
+
+|`SQL_NUMERIC`
+|NO
+
+|`SQL_SMALLINT`
+|YES
+
+|`SQL_INTEGER`
+|YES
+
+|`SQL_REAL`
+|NO
+
+|`SQL_FLOAT`
+|YES
+
+|`SQL_DOUBLE`
+|YES
+
+|`SQL_BIT`
+|YES
+
+|`SQL_TINYINT`
+|YES
+
+|`SQL_BIGINT`
+|YES
+
+|`SQL_BINARY`
+|YES
+
+|`SQL_VARBINARY`
+|YES
+
+|`SQL_LONGVARBINARY`
+|YES
+
+|`SQL_TYPE_DATE`
+|YES
+
+|`SQL_TYPE_TIME`
+|YES
+
+|`SQL_TYPE_TIMESTAMP`
+|YES
+
+|`SQL_TYPE_UTCDATETIME`
+|NO
+
+|`SQL_TYPE_UTCTIME`
+|NO
+
+|`SQL_INTERVAL_MONTH`
+|NO
+
+|`SQL_INTERVAL_YEAR`
+|NO
+
+|`SQL_INTERVAL_YEAR_TO_MONTH`
+|NO
+
+|`SQL_INTERVAL_DAY`
+|NO
+
+|`SQL_INTERVAL_HOUR`
+|NO
+
+|`SQL_INTERVAL_MINUTE`
+|NO
+
+|`SQL_INTERVAL_SECOND`
+|NO
+
+|`SQL_INTERVAL_DAY_TO_HOUR`
+|NO
+
+|`SQL_INTERVAL_DAY_TO_MINUTE`
+|NO
+
+|`SQL_INTERVAL_DAY_TO_SECOND`
+|NO
+
+|`SQL_INTERVAL_HOUR_TO_MINUTE`
+|NO
+
+|`SQL_INTERVAL_HOUR_TO_SECOND`
+|NO
+
+|`SQL_INTERVAL_MINUTE_TO_SECOND`
+|NO
+
+|`SQL_GUID`
+|YES
+|=======================================================================
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/sql/system-views.adoc b/docs/_docs/developers-guide/sql/system-views.adoc
new file mode 100644
index 0000000..f1405fa
--- /dev/null
+++ b/docs/_docs/developers-guide/sql/system-views.adoc
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= System Views
+
+Ignite provides a number of built-in SQL views that provide information on the cluster's state and provide real-time insight into the status of its components. These views are available in the SYSTEM schema.
+
+== Getting Data
+
+You access system views in Ignite by using SQL and selecting data from the system view like you would from any other table.  For example, you can get a list of all available system views in the following way:
+
+[source, sql]
+----
+SELECT id, schema, name FROM system.system_views WHERE type = 'NODE'
+----
+
+You can also use joins to combine data from multiple views. The example below returns all columns of a view that was found in the `SYSTEM_VIEWS` view:
+
+[source, sql]
+----
+SELECT svc.*
+  FROM system.system_view_columns svc
+  JOIN system.system_views sv ON svc.view_id = sv.id
+ WHERE sv.name = 'SYSTEM_VIEWS'
+----
+
+== Available Views
+
+=== SYSTEM_VIEWS
+
+Describes available system views.
+
+[width="100%", cols="15%a, 15%a, 60%a",opts="header"]
+|=======
+|Column	|Data Type| Description
+
+| ID | INT32 | System view ID.
+| SCHEMA | STRING | Name of the schema used. Default is `SYSTEM`.
+| NAME | STRING | System view name.
+| TYPE | STRING | System view type. Possible values:
+
+* NODE - The view provides node-specific information. Data will be collected from all nodes, and represented in the view.
+* CLUSTER - The view provides cluster-wide information. Data will be collected from one node, chosen to represent the cluster.
+
+|=======
+
+=== SYSTEM_VIEW_COLUMNS
+
+Describes available system view columns.
+
+[width="100%", cols="15%a, 15%a, 60%a",opts="header"]
+|=======
+|Column	|Data Type| Description
+
+| VIEW_ID | INT32 | System view ID.
+| NAME | STRING | Column name.
+| TYPE | STRING | Column type. Can by any of the link:sql-reference/data-types[supported types].
+| NULLABLE | BOOLEAN |Defines if the column can be empty.
+| PRECISION | INT32 |Maximum number of digits.
+| SCALE | INT32 |Maximum number of decimal places.
+| LENGTH | INT32 |Maximum length of the value. Symbols for string values or bytes for binary values.
+
+|=======
+
+=== SYSTEM.ZONES
+
+[width="100%", cols="15%a, 15%a, 60%a",opts="header"]
+|=======
+|Column	|Data Type| Description
+
+| NAME | STRING | The name of the distribution zone.
+| PARTITIONS | INT32 | The number of partitions in the distribution zone.
+| REPLICAS | STRING |The number of copies of each partition in the distribution zone.
+| DATA_NODES_AUTO_ADJUST_SCALE_UP | INT32 | The delay in seconds between the new node joining and the start of data zone adjustment.
+| DATA_NODES_AUTO_ADJUST_SCALE_DOWN | INT32 | The delay in seconds between the node leaving the cluster and the start of data zone adjustment.
+| DATA_NODES_FILTER | STRING | The filter that specifies what nodes will be used by the distribution zone.
+| IS_DEFAULT_ZONE | BOOLEAN | Defines if the data zone is used by default.
+
+|=======
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/table-api.adoc b/docs/_docs/developers-guide/table-api.adoc
new file mode 100644
index 0000000..f7ae68d
--- /dev/null
+++ b/docs/_docs/developers-guide/table-api.adoc
@@ -0,0 +1,396 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Table API
+
+To execute table operations on a specific table, you need to get a specific view of the table and use one of its methods. You can only create new tables by using SQL API.
+
+Ignite supports mapping user objects to table tuples. This ensures that objects created in any programming language can be used for key-value operations directly.
+
+== Getting a Table Instance
+
+First, get an instance of the table. To obtain an instance of table, use the `IgniteTables.table(String)` method. You can also use `IgniteTables.tables()` method to list all existing tables.
+
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+IgniteTables tableApi = client.tables();
+List<Table> existingTables = tableApi.tables();
+Table firstTable = existingTables.get(0);
+
+Table myTable = tableApi.table("MY_TABLE");
+----
+
+tab:.NET[]
+[source, csharp]
+----
+var existingTables = await Client.Tables.GetTablesAsync();
+var firstTable = existingTables[0];
+
+var myTable = await Client.Tables.GetTableAsync("MY_TABLE");
+----
+
+tab:C++[]
+[source, cpp]
+----
+using namespace ignite;
+
+auto table_api = client.get_tables();
+std::vector<table> existing_tables = table_api.get_tables();
+table first_table = existing_tables.front();
+
+std::optional<table> my_table = table_api.get_table("MY_TABLE);
+----
+--
+
+== Basic Table Operations
+
+Once you've got a table you need to get a specific view to choose how you want to operate table records.
+
+=== Binary Record View
+
+A binary record view. It can be used to operate table tuples directly.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+RecordView<Tuple> view = table.recordView();
+
+Tuple fullRecord = Tuple.create()
+  .set("id", 42)
+  .set("name", "John Doe");
+
+view.upsert(null, fullRecord);
+
+Tuple keyRecord = Tuple.create().set("id", 42);
+
+Tuple resRecord = view.get(null, keyRecord);
+
+assert resRecord.columnCount() == 2;
+assert resRecord.intValue("id") == 42;
+assert resRecord.stringValue("name").equals("John Doe");
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IRecordView<IIgniteTuple> view = table.RecordBinaryView;
+
+IIgniteTuple fullRecord = new IgniteTuple
+{
+  ["id"] = 42,
+  ["name"] = "John Doe"
+};
+
+await view.UpsertAsync(transaction: null, fullRecord);
+
+IIgniteTuple keyRecord = new IgniteTuple { ["id"] = 42 };
+(IIgniteTuple value, bool hasValue) = await view.GetAsync(transaction: null, keyRecord);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.FieldCount == 2);
+Debug.Assert(value["id"] as int? == 42);
+Debug.Assert(value["name"] as string == "John Doe");
+----
+
+tab:C++[]
+[source, cpp]
+----
+record_view<ignite_tuple> view = table.get_record_binary_view();
+
+ignite_tuple record{
+  {"id", 42},
+  {"name", "John Doe"}
+};
+
+view.upsert(nullptr, record);
+std::optional<ignite_tuple> res_record = view.get(nullptr, {"id", 42});
+
+assert(res_record.has_value());
+assert(res_record->column_count() == 2);
+assert(res_record->get<std::int64_t>("id") == 42);
+assert(res_record->get<std::string>("name") == "John Doe");
+----
+
+--
+
+=== Record View
+
+A record view mapped to a user type. It can be used to operate table using user objects which are mapped to table tuples.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+RecordView<Pojo> pojoView = table.recordView(Mapper.of(Pojo.class));
+
+pojoView.upsert(null, new Pojo(42, "John Doe"));
+Pojo resRecord = pojoView.get(null, new Pojo(42));
+
+assert resRecord.id == 42;
+assert resRecord.name.equals("John Doe");
+----
+
+tab:.NET[]
+[source, csharp]
+----
+var pocoView = table.GetRecordView<Poco>();
+
+await pocoView.UpsertAsync(transaction: null, new Poco(42, "John Doe"));
+var (value, hasValue) = await pocoView.GetAsync(transaction: null, new Poco(42));
+
+Debug.Assert(hasValue);
+Debug.Assert(value.Name == "John Doe");
+
+public record Poco(long Id, string? Name = null);
+----
+
+tab:C++[]
+[source, cpp]
+----
+record_view<person> view = table.get_record_view<person>();
+
+person record(42, "John Doe");
+
+view.upsert(nullptr, record);
+std::optional<person> res_record = view.get(nullptr, person{42});
+
+assert(res.has_value());
+assert(res->id == 42);
+assert(res->name == "John Doe");
+----
+
+--
+
+=== Key-Value Binary View
+
+A binary key-value view. It can be used to operate table using key and value tuples separately.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+KeyValueView<Tuple, Tuple> kvView = table.keyValueView();
+
+Tuple key = Tuple.create().set("id", 42)
+Tuple val = Tuple.create().set("name", "John Doe");
+
+kvView.put(null, key, val);
+Tuple res = kvView.get(null, key);
+
+assert res.columnCount() == 1;
+assert res.stringValue("name").equals("John Doe");
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IKeyValueView<IIgniteTuple, IIgniteTuple> kvView = table.KeyValueBinaryView;
+
+IIgniteTuple key = new IgniteTuple { ["id"] = 42 };
+IIgniteTuple val = new IgniteTuple { ["name"] = "John Doe" };
+
+await kvView.PutAsync(transaction: null, key, val);
+(IIgniteTuple? value, bool hasValue) = await kvView.GetAsync(transaction: null, key);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.FieldCount == 1);
+Debug.Assert(value["name"] as string == "John Doe");
+----
+
+tab:C++[]
+[source, cpp]
+----
+key_value_view<ignite_tuple, ignite_tuple> kv_view = table.get_key_value_binary_view();
+
+ignite_tuple key_tuple{{"id", 42}};
+ignite_tuple val_tuple{{"name", "John Doe"}};
+
+kv_view.put(nullptr, key_tuple, val_tuple);
+std::optional<ignite_tuple> res_tuple = kv_view.get(nullptr, key_tuple);
+
+assert(res_tuple.has_value());
+assert(res_tuple->column_count() == 2);
+assert(res_tuple->get<std::int64_t>("id") == 42);
+assert(res_tuple->get<std::string>("name") == "John Doe");
+----
+
+--
+
+
+=== Key-Value View
+
+A key-value view with user objects. It can be used to operate table using key and value user objects mapped to table tuples.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+KeyValueView<Long, Pojo> pojoView =
+  table.keyValueView(Mapper.of(Long.class), Mapper.of(Pojo.class));
+
+pojoView.put(null, 42, new Pojo("John Doe"));
+Pojo val = pojoView.get(null, 42);
+
+assert val.name.equals("John Doe");
+----
+
+tab:.NET[]
+[source, csharp]
+----
+IKeyValueView<long, Poco> kvView = table.GetKeyValueView<long, Poco>();
+
+await kvView.PutAsync(transaction: null, 42, new Poco(Id: 0, Name: "John Doe"));
+(Poco? value, bool hasValue) = await kvView.GetAsync(transaction: null, 42);
+
+Debug.Assert(hasValue);
+Debug.Assert(value.Name == "John Doe");
+
+public record Poco(long Id, string? Name = null);
+----
+
+tab:C++[]
+[source, cpp]
+----
+key_value_view<person, person> kv_view = table.get_key_value_view<person, person>();
+
+kv_view.put(nullptr, {42}, {"John Doe"});
+std::optional<person> res = kv_view.get(nullptr, {42});
+
+assert(res.has_value());
+assert(res->id == 42);
+assert(res->name == "John Doe");
+----
+--
+
+
+== Criterion Queries
+
+Ignite 3 provides the criterion queries that can be used to retrieve data from tables. Criterion queries work with any type of view, returning the appropriate data to the query specified.
+
+The example below shows how you can execute a query within an implicit transaction:
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+try (Cursor<Entry<Tuple, Tuple>> cursor = kvView().query(
+        null,
+        and(columnValue("City", equalTo("New York")), columnValue("Salary", greaterThan(10000)))
+)) {
+    // ...
+}
+
+
+----
+--
+
+The comparison query are specified by using the `query()` method, and providing the comparison criteria in the `columnValue` method.
+
+You can also specify the specific transaction to execute the query in to perform the query in that specific transaction.
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+var tx = client.transactions().begin();
+
+try (Cursor<Entry<Tuple, Tuple>> cursor = kvView().query(
+        tx,
+        and(columnValue("City", equalTo("New York")), columnValue("Salary", greaterThan(10000)))
+)) {
+    // ...
+}
+
+tx.close();
+----
+--
+
+=== Asynchronous Queries
+
+You can also perform the query asynchronously by using the `queryAsync` method. This way the query is executed without blocking the thread. For example, you can execute the above query asynchronously:
+
+[tabs]
+--
+tab:Java[]
+----
+view.queryAsync(null, and(columnValue("City", equalTo("New York")), columnValue("Salary", greaterThan(10000)))
+    .thenCompose(this::fetchAllRowsInto)
+    .join();
+----
+--
+
+This operation uses the `thenCompose()` method to handle the query results asynchronously in the user-defined `fetchAllRowsInto()` method. Here is how this method may look like:
+
+[tabs]
+--
+tab:Java[]
+----
+private static CompletionStage<Void> fetchAllRowsInto(AsyncCursor<Entry<Tuple, Tuple>> cursor) {
+    // Process the current page.
+    for (var row : cursor.currentPage()) {
+       // ...
+    }
+    // Finish processing if no more data is currently available.
+    if (!cursor.hasMorePages()) {
+        return nullCompletedFuture();
+    }
+    // Request for the next page, then subscribe to the response.
+    return cursor.fetchNextPage().thenCompose(this::fetchAllRowsInto);
+}
+----
+--
+
+
+=== Comparison Expressions
+
+The following expressions are supported in criterion queries:
+
+[cols="15%,60%,25%",opts="header"]
+|======
+|Expression|Description|Example
+|`equalTo`|Checks if the object is equal to the value.|`columnValue("City", equalTo("New York"))`
+|`notEqualTo`|Checks if the object is not equal to the value.|`columnValue("City", notEqualTo("New York"))`
+|`greaterThan`|Checks if the object is greater than the value.|`columnValue("Salary", greaterThan(10000))`
+|`greaterThanOrEqualTo`|Checks if the object is greater than or equal to the value.|`columnValue("Salary", greaterThanOrEqualTo(10000))`
+|`lessThan`|Checks if the object is less than the value.|`columnValue("Salary", lessThan(10000))`
+|`lessThanOrEqualTo`|Checks if the object is less than or equal to the value.|`columnValue("Salary", lessThanOrEqualTo(10000))`
+|`nullValue`|Checks if the object is null.|`columnValue("City", nullValue()`
+|`notNullValue`|Checks if the object is not null.|`columnValue("City", notNullValue())`
+|`in`|Checks if the object is in the collection.|`columnValue("City", in("New York", "Washington"))`
+|`notIn`|Checks if the object is not in the collection.|`columnValue("City", notIn("New York", "Washington"))`
+|======
+
+=== Comparison Operators
+
+The following operators are supported in criterion queries:
+
+
+[cols="15%,60%,25%",opts="header"]
+|======
+|Operator|Description|Example
+|`not`|Negates the condition.|`not(columnValue("City", equalTo("New York")))`
+|`and`|Used to evaluate multiple conditions at the same time.|`and(columnValue("City", equalTo("New York")), columnValue("Salary", greaterThan(10000)))`
+|`or`|Used to evaluate for at least one matching condition.|`or(columnValue("City", equalTo("New York")), columnValue("Salary", greaterThan(10000)))`
+|======
\ No newline at end of file
diff --git a/docs/_docs/developers-guide/transactions.adoc b/docs/_docs/developers-guide/transactions.adoc
new file mode 100644
index 0000000..b2c4a06
--- /dev/null
+++ b/docs/_docs/developers-guide/transactions.adoc
@@ -0,0 +1,87 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Transactions
+
+All table operations in Ignite 3 are transactional. You can provide an explicit transaction as a first argument of any Table and SQL API call. If you do not provide an explicit transaction, an implicit one will be created for every call.
+
+Here is how you  can provide a transaction explicitly:
+
+[tabs]
+--
+tab:Java[]
+[source, java]
+----
+KeyValueView<Long, Account> accounts =
+  table.keyValueView(Mapper.of(Long.class), Mapper.of(Account.class));
+
+accounts.put(null, 42, new Account(16_000));
+
+var tx = client.transactions().begin();
+
+Account account = accounts.get(tx, 42);
+account.balance += 500;
+accounts.put(tx, 42, account);
+
+assert accounts.get(tx, 42).balance == 16_500;
+
+tx.rollback();
+
+assert accounts.get(tx, 42).balance == 16_000;
+----
+
+tab:.NET[]
+[source, csharp]
+----
+var accounts = table.GetKeyValueView<long, Account>();
+await accounts.PutAsync(transaction: null, 42, new Account(16_000));
+
+await using ITransaction tx = await client.Transactions.BeginAsync();
+
+(Account account, bool hasValue) = await accounts.GetAsync(tx, 42);
+account = account with { Balance = account.Balance + 500 };
+
+await accounts.PutAsync(tx, 42, account);
+
+Debug.Assert((await accounts.GetAsync(tx, 42)).Value.Balance == 16_500);
+
+await tx.RollbackAsync();
+
+Debug.Assert((await accounts.GetAsync(null, 42)).Value.Balance == 16_000);
+
+public record Account(decimal Balance);
+----
+
+tab:C++[]
+[source, cpp]
+----
+auto accounts = table.get_key_value_view<account, account>();
+
+account init_value(42, 16'000);
+accounts.put(nullptr, {42}, init_value);
+
+auto tx = client.get_transactions().begin();
+
+std::optional<account> res_account = accounts.get(&tx, {42});
+res_account->balance += 500;
+accounts.put(&tx, {42}, res_account);
+
+assert(accounts.get(&tx, {42})->balance == 16'500);
+
+tx.rollback();
+
+assert(accounts.get(&tx, {42})->balance == 16'000);
+----
+
+--
diff --git a/docs/_docs/general-tips.adoc b/docs/_docs/general-tips.adoc
index 058497f..58ac1c3 100644
--- a/docs/_docs/general-tips.adoc
+++ b/docs/_docs/general-tips.adoc
@@ -1,14 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 = General Configuration Tips
 
+== Configuring Default Cluster Storage
+
+When cluster is created, the default distribution zone is used for storage configuration. While we recommend creating distribution zones for your clusters, you can still use the default zone and configure it to suit your needs.
+
+To get default storage configuration, use the `cluster config show zone command`. Here is an example of the default configuration:
+
+[source, json]
+----
+"zone" : {
+    "defaultDataStorage" : "aipersist",
+    "defaultDistributionZone" : {
+      "dataNodesAutoAdjust" : 2147483647,
+      "dataNodesAutoAdjustScaleDown" : 2147483647,
+      "dataNodesAutoAdjustScaleUp" : 0,
+      "dataStorage" : {
+        "dataRegion" : "default",
+        "name" : "aipersist"
+      },
+      "filter" : "$..*",
+      "partitions" : 25,
+      "replicas" : 1,
+      "zoneId" : 0
+    },
+    "distributionZones" : [ ],
+    "globalIdCounter" : 0
+  }
+----
+
+To change type of storage used for new distribution zones, change the `zone.defaultDataStorage` value to `aimem` or `rocksDb`. You can also change the default data region used for new distribution zones by setting the `zone.defaultDistrubutionZone.dataStorage.dataRegion` parameter. You will need to restart the cluster after changing the data region parameters.
+
+You can also change these properties for link:sql-reference/distribution-zones[distribution zones] you have created for yourself.
+
+You can get information about the data region by using the `cluster config show aipersist` CLI command. Here is how the default data region may look like:
+
+[source, json]
+----
+{
+  "checkpoint" : {
+    "checkpointDelayMillis" : 200,
+    "checkpointThreads" : 4,
+    "compactionThreads" : 4,
+    "frequency" : 180000,
+    "frequencyDeviation" : 40,
+    "logReadLockThresholdTimeout" : 0,
+    "readLockTimeout" : 10000,
+    "useAsyncFileIoFactory" : true
+  },
+  "defaultRegion" : {
+    "memoryAllocator" : {
+      "type" : "unsafe"
+    },
+    "replacementMode" : "CLOCK",
+    "size" : 268435456
+  },
+  "pageSize" : 16384,
+  "regions" : [ ]
+}
+----
+
+To change the size of the default region, use the `cluster config update` command:
+
+[source,shell]
+----
+cluster config update --url http://localhost:10300 {aipersist.defaultRegion.size:9999999}
+----
+
+== Configuring Local Paths
+
+By default, all files generated by Apache Ignite are stored in the installation folder. However, depending on your environment, you may need to change the path to your files. You can use the `{IGNITE_HOME}\etc\vars.env` file to change the storage paths of your files. You can change paths to the following:
+
+- Work directory, where data is stored.
+- Log folder, where logs are placed.
+- The folder from which libraries are loaded.
+- The configuration file that is used to set up the default node.
+
 == Configuring Client Logging
 
-By default, Apache Ignite 3 uses the `java.util.logging` (JUL) logging framework. Ignite uses the `etc/ignite.java.util.logging.properties` configuration, and outputs logs to the folder configured in the `LOG_DIR` variable that can be configured in the `etc/vars.env` file. By default, logs are stored in the `{IGNITE_HOME}/log` folder. You can provide a custom configuration file by using the `java.util.logging.config.file` property.
+By default, Ignite 3 uses the `java.util.logging` (JUL) logging framework. Ignite uses the `etc/ignite.java.util.logging.properties` configuration, and outputs logs to the folder configured in the `LOG_DIR` variable that can be configured in the `etc/vars.env` file. By default, logs are stored in the `{IGNITE_HOME}/log` folder. You can provide a custom configuration file by using the `java.util.logging.config.file` property.
 
-Some modules use libraries that rely on SLF4J logging. To gather logs from these libraries, add `org.slf4j:slf4j-jdk14:2.0.x` class to the classpath.
+Some Ignite modules use libraries that rely on SLF4J logging. To gather logs from these libraries, add `org.slf4j:slf4j-jdk14:2.0.x` class to the classpath.
 
 For more information on configuring JUL logging, see the link:https://docs.oracle.com/en/java/javase/11/core/java-logging-overview.html[Java Logging Overview] in Oracle documentation.
 
-Ignite 3 also supports other logging frameworks if you need to customize the logger.
+Ignite also supports other logging frameworks if you need to customize the logger.
 
 === LOG4J 2
 
diff --git a/docs/_docs/glossary/glossary.adoc b/docs/_docs/glossary/glossary.adoc
index d3c5393..2802122 100644
--- a/docs/_docs/glossary/glossary.adoc
+++ b/docs/_docs/glossary/glossary.adoc
@@ -14,28 +14,63 @@
 // limitations under the License.
 = Glossary
 
-==== C
+== C
 
-Cluster Management Group::A subset of Ignite nodes in a Raft cluster. Cluster group leader is responsible for managing nodes that enter or leave Ignite Cluster.
+Cluster management group:: A subset of Ignite nodes in a RAFT group. Cluster group leader is responsible for managing nodes that enter or leave a Ignite cluster.
 
-==== D
+Cluster node:: A cluster node is the base computational and data storage unit in Ignite.
 
-Data Region:: Data regions are used to control the amount of memory available to the storage. Depending on the type of storage the data region is assigned to, the data may be loaded into RAM or stored
+Columnar storage:: A mechanism that is optimized for quick access to columns of data. It can be used to drastically improve performance when reading a specific column's values from a large number of rows.
 
-Data Rebalance:: Data rebalance is the process of redistributing partitions to make sure they are distributed equally across all nodes in the cluster.
+Coordinator node:: The node that received the distributed computing job, manages its execution, and reports the results to the client.
 
-==== M
+== D
 
-Metastore::  Metastore holds additional information about Apache Ignite cluster that is required for its operation, for example the number and type of data regions configured.
+Data region:: Used to control the amount of memory available for storage. Depending on the type of storage a data region is assigned to, the data may be loaded into RAM or stored on disk.
+
+Data rebalance:: The process of redistributing partitions equally across all nodes in a cluster.
+
+Distribution zone:: Distribution zone controls how data is placed into partitions, and how partitions are distributed on nodes on the cluster. Distribution zones are part of cluster configuration, and can be modified with link:sql-reference/distribution-zones[SQL commands].
+
+== L
+
+Logical Topology:: A set of nodes connected into a Raft group is called a logical topology. These nodes follow the Raft leader and form a Ignite cluster.
+
+== M
+
+Metastore::  Metastore holds additional information about Ignite cluster that is required for its operation, for example the number and type of data regions configured.
 
 
-==== P
+== P
 
 Persistent Storage:: Persistent storage is the type of memory storage that is preserved regardless of cluster state. Some portion of data will be loaded into RAM to improve performance.
 
+Physical topology:: When nodes are started, they find each other and form a cluster on a physical topology. All nodes on a physical topology can form a Ignite cluster, but are not necessarily part of it.
 
-==== V
+Primary Storage:: Primary storage is the database to which data is written and from which it is usually read.
 
-Volatile Storage:: Volatile storage is the type of memory storage that is only preserved while the cluster is active. Loss of power or unexpected cluster shutdown will lead to loss of data.
+== R
 
+RAFT:: Raft is a consensus algorithm that is used by Ignite to manage Ignite cluster. It provides high degree of stability and data consistency by using elections to guarantee that there is always only one cluster leader that had an authoritative log of all transactions performed on the cluster.
 
+RAFT Log:: Raft log is an append only collection of all operations performed on the cluster. Leader log is the sole authority in the cluster, and overwrites any contradicting logs on follower nodes.
+
+Rebalance:: The process of relocation partitions between nodes to guarantee consistent data distribution after cluster topology changes.
+
+Replica storage:: Provides a dynamically expanded copy of the primary storage, which can be set up to use a different storage type for better performance.
+
+== S
+
+Snapshot:: A backup of data in a Ignite cluster. A snapshots taken on one cluster can be applied to another cluster.
+
+== V
+
+Volatile storage:: Memory storage that is only preserved while the cluster is active. Loss of power or unexpected cluster shutdown will lead to loss of data.
+
+== U
+
+Update buffer:: Buffer that stores transactions to the primary storage before committing them to a replica storage. This reduces the number of transactions added to the latter.
+
+== W
+
+Worker node:: The node performing the distributed computing job.
\ No newline at end of file
diff --git a/docs/_docs/handling-exceptions.adoc b/docs/_docs/handling-exceptions.adoc
deleted file mode 100644
index f3924a4..0000000
--- a/docs/_docs/handling-exceptions.adoc
+++ /dev/null
@@ -1,135 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-= Exceptions
-
-This section outlines basic exceptions that can be generated by Ignite 3 and provides basic instructions for handling them.
-
-== Finding Stack Trace Information
-
-When the exception happens, Apache Ignite 3 provides a UUID of the specific exception, but not a full stack trace. For a full stack trace, check cluster logs.
-
-== Common Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-CMN-1`|Operation was stopped because node is stopping.
-|`IGN-CMN-2`|Required component was not started.
-|`IGN-CMN-65535`|Internal error.
-|===
-
-== Table Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-TBL-1`|Table already exists.
-|`IGN-TBL-2`|Table not found.
-|`IGN-TBL-3`|Column already exists.
-|`IGN-TBL-4`|Column not found.
-|===
-
-== Client Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-CLIENT-1`|Connection to client failed.
-|`IGN-CLIENT-2`|An issue happened with connection protocol.
-|`IGN-CLIENT-3`|Incompatible protocol version.
-|`IGN-CLIENT-4`|Table not found by ID.
-|`IGN-CLIENT-5`|An error with authentication parameters.
-|`IGN-CLIENT-6`|An error occurred during server authorization.
-|`IGN-CLIENT-7`|An error occurred while reading client configuration.
-|===
-
-== SQL  Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-SQL-1`|Ignite tried to read a page after last one.
-|`IGN-SQL-2`|The specified either does not exist or is closed.
-|`IGN-SQL-3`|SQL query is incorrect.
-|`IGN-SQL-4`|Query returned no result set.
-|`IGN-SQL-5`|Table is missing primary key.
-|`IGN-SQL-6`|Multiple primary keys found in a table.
-|`IGN-SQL-7`|Failed to find schema.
-|`IGN-SQL-8`|Specified storage engine is not supported.
-|`IGN-SQL-9`|Cursor is already closed when another operation starts.
-|`IGN-SQL-10`|Some keys could not be inserted because they are duplicates.
-|`IGN-SQL-11`|Cannot delete a column that belongs to the primary key.
-|`IGN-SQL-12`|Too many grouping expressions.
-|`IGN-SQL-13`|Unsupported SQL operation.
-|`IGN-SQL-14`|Unsupported DDL operation.
-|`IGN-SQL-15`|Query validation error.
-|`IGN-SQL-16`|Specified table not found.
-|`IGN-SQL-17`|Specified table version not found.
-|`IGN-SQL-18`|Invalid table option specified.
-|`IGN-SQL-19`|Query mapping error.
-|`IGN-SQL-20`|DDL execution error.
-|`IGN-SQL-21`|DML result error.
-|`IGN-SQL-22`|Failed to map SQL data type to relational.
-|`IGN-SQL-23`|Failed to serialize relational expression.
-|`IGN-SQL-24`|Failed to deserialized relational expression.
-|`IGN-SQL-25`|Class not found.
-|`IGN-SQL-26`|Failed to compile an SQL expression.
-|`IGN-SQL-27`|Node left the cluster.
-|`IGN-SQL-28`|Failed to send a message.
-|`IGN-SQL-29`|Operation aborted or interrupted.
-|`IGN-SQL-30`|An error occurred while canceling the operation.
-|`IGN-SQL-31`|Session expired.
-|`IGN-SQL-32`|Session evaluation error.
-|`IGN-SQL-33`|Execution cancelled.
-|===
-
-== Meta Storage Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-META-1`|Failed to start the underlying key value storage.
-|`IGN-META-2`|Failed to restore the underlying key value storage.
-|`IGN-META-3`|Failed to close the underlying key value storage.
-|`IGN-META-4`|Failed to compact the underlying key value storage.
-|`IGN-META-5`|Failed to perform an operation on the underlying key value storage.
-|`IGN-META-6`|Failed to perform an operation within a specified time period. Usually in such cases the operation should be retried.
-|`IGN-META-7`|Failed to iterate over the underlying key value storage.
-|`IGN-META-8`|Failed to stop a watcher.
-|`IGN-META-9`|Failed to deploy or update a watcher.
-|`IGN-META-10`|Failed to iterate over meta storage cursor.
-|`IGN-META-11`|Failed to close a cursor.
-|===
-
-== Index Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-IDX-1`|Invalid index definition.
-|`IGN-IDX-2`|Failed to find the specified index.
-|===
-
-== Transactions Exceptions
-
-[cols="20%,80%", width="100%"]
-|===
-|Exception	|Description
-|`IGN-TX-1`|Failed to create a transaction state storage.
-|`IGN-TX-2`|Failed to destroy the transaction state storage.
-|`IGN-TX-3`|Failed to work with the transaction state storage.
-|`IGN-TX-4`|Transaction state storage is stopped when a different operation is planned.
-|===
\ No newline at end of file
diff --git a/docs/_docs/ignite-cli-tool.adoc b/docs/_docs/ignite-cli-tool.adoc
index 0d03962..e585e20 100644
--- a/docs/_docs/ignite-cli-tool.adoc
+++ b/docs/_docs/ignite-cli-tool.adoc
@@ -12,75 +12,129 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
+= Ignite 3 CLI Tool
 
-= Ignite CLI Tool
+== Overview
 
-The Ignite CLI tool uses REST API as a communication protocol between the node and the CLI,
-allowing you to configure the node. By default, the CLI tool runs in the REPL (Read Eval Print Loop) mode, but you can also execute commands without entering it.
+The Ignite 3 CLI tool uses REST API as a communication protocol between the node and the CLI, allowing you to configure the node. By default, the CLI tool runs in the interactive mode, but you can also execute commands without entering it.
 
-Below is a list of Ignite CLI tool commands with short descriptions:
+When using the Ignite 3 CLI tool, the arguments should be put in quotation marks, and the string should not include line breaks. For example:
 
-== Installation and Usage
+[source, bash]
+----
+cluster config update "security.authentication.providers.basic4={type=basic,username=name,password=pass}"
+----
 
-You can find detailed installation guide in the link:quick-start/getting-started-guide[Getting Started] tutorial. The tutorial also includes some basic usage examples.
+Otherwise, special POSIX characters in strings (`{` and `}` in the above command) will be handled appropriately, leading to potentially unpredictable results.
 
-== Command Options
+Alternatively, you can use the backslash (`\`) to escape all special characters in your command. For example:
 
-The following commands can be executed in REPL mode of the command line tool:
+[source, bash]
+----
+cluster config update security.authentication.providers.basic4=\{type=basic,username=name,password=pass\}
+----
+
+== Commands and Options
+
+The following commands can be executed in the interactive CLI mode.
+
+=== CLI Commands
+
+These commands help you configure your CLI tool and cluster.
 
 [cols="1,1,3",opts="header", stripes=none]
 |===
 | Command| Parameters | Description
-//| `bootsrap` || Installs Ignite core modules locally. Previously `init`.
-| `clear` || Clears the terminal.
 | `cli config` || Gets all CLI tool configuration values.
-| `cli config get` | `<config key-value>` | Gets the value of the specified CLI tool configuration.
-| `cli config set` | `<config key-value>` | Sets the value of the specified CLI tool configuration.
-| `cli config show` | `<config key-value>` | Shows the cluster configuration.
-| `cluster init` | `[--cluster-url] <--meta-storage-node> [--cmg-node]`| Initializes the cluster on the specified URL.
-| `cluster status` | `[--cluster-url]` | Displays the detailed status of the specified cluster.
-| `cluster config show` | `[--cluster-url] [--selector]` | Gets the configuration of the specified cluster.
-| `cluster config update` | `[--cluster-url] <config in HOCON format>` | Updates the configuration of the cluster with the provided configuration in HOCON format.
-| `cluster topology physical` | `[--cluster-url]` | Shows physical cluster topology.
-| `cluster topology logical` | `[--cluster-url]` | Shows logical cluster topology.
-| `connect` | [--cluster-url] | Connects to the specified cluster, or `localhost:3000` if no cluster url is specified. All subsequent commands use the specified cluster url.
-| `disconnect` || Disconnects from the cluster.
-| `exit` || Stops current interactive session.
-| `help` | `<command>` | Provides information on available commands, or for specific command if the `command` parameter is specified.
-| `node classpath` || Shows the current classpath used by Ignite nodes.
-| `node config show` | `[--node-url][--selector]` | Gets the local node configuration.
-|`node config update` | `[--node-url] <config in HOCON format>` | Updates the local node configuration with the provided configuration in HOCON format.
-//| `node list` || Shows the list of currently running local Ignite nodes.
-//| `node start` | `[--config] <nodeName>` | Starts an Ignite node locally.
-//| `node stop` | `<nodeName>` | Stops a locally running Ignite node.
-| `node status` | `--node-url` |Shows the status of the default cluster, or a different one if cluster-url is provided.
-| `sql` | `[--cluster-url][--script-file] <query>` | Executes the specified SQL query.
-| `version` || Displays current CLI tool version.
+| `cli config profile create` | `[--copy-from] <profileName>` | Creates a profile with the given name.
+| `cli config profile activate` | `<profileName>` | Activates the profile identified by name.
+| `cli config profile list` | | Lists configuration profiles.
+| `cli config profile show` | | Gets the current profile details.
+| `cli config get` | `<key>` | Gets the value for the specified configuration key.
+| `cli config set` | `<key=value>` | Sets configuration parameters using comma-separated input key-value pair(s).
+| `cli config show` | `[--cluster-endpoint-url] [--selector]` | Shows the cluster configuration.
+| `cli config remove` | `<key>` | Removes a configuration key.
 |===
 
-== Non-interactive Mode
+=== Cluster Commands
 
-The following commands can be executed in non-REPL mode of the command-line tool:
+These commands let you manage your cluster.
 
 [cols="1,1,3",opts="header", stripes=none]
 |===
 | Command| Parameters | Description
-| `ignite3 cli config` || Gets all CLI tool configuration values.
-| `ignite3 cli config get` | `<config key-value>` | Gets the value of the specified CLI tool configuration.
-| `ignite3 cli config set` | `<config key-value>` | Sets the value of the specified CLI tool configuration.
-//| `ignite3 bootsrap` || Installs Ignite core modules locally. Previously `init`.
-| `ignite3 cluster config show` | `[--cluster-url] [--selector]` | Gets the configuration of the specified cluster.
-| `ignite3 cluster config update` | `[--cluster-url] <config in HOCON format>` | Sets the configuration of the cluster with the provided configuration in HOCON format.
-| `ignite3 cluster init` | `[--cluster-url] <--meta-storage-node> [--cmg-node]`| Initializes the cluster on the specified URL.
-| `ignite3 cluster status` | `[--cluster-url]` | Displays the detailed status of the specified cluster.
-| `ignite3 help` | `<command>` | Provides information on available commands, or for specific command if the `command` parameter is specified.
-//| `ignite3 node classpath` || Shows the current classpath used by Ignite nodes.
-| `ignite3 node config show` | `[--node-url][--selector]` | Gets the local node configuration.
-//| `ignite3 node list` || Shows the list of currently running local Ignite nodes.
-//| `ignite3 node start` | `[--config] <nodeName>` | Starts an Ignite node locally.
-//| `ignite3 node stop` | `<nodeName>` | Stops a locally running Ignite node.
-|`ignite3 node config update` | `[--node-url] <config in HOCON format>` | Updates the local node configuration with the provided configuration in HOCON format.
-| `ignite3 node status` | `[--node-url]` |Shows the status of the default node, or a different one if node-url is provided.
-| `ignite3 sql` | `[--cluster-url][--script-file] <query>` | Executes the specified SQL query.
-| `ignite3 version` || Displays current CLI tool version.
+| `cluster init` | `[--cmg-node] [--meta-storage-node] <clusterName> or < --cluster-endpoint-url> <cluster-config> or < --cluster-config-file>`| Initializes a cluster.
+| `cluster status` | `[--cluster-endpoint-url]` | Displays the detailed status of the specified cluster.
+| `cluster topology physical` | `[--plain] [--cluster-endpoint-url]` | Shows physical topology of the specified cluster.
+| `cluster topology logical` | `[--plain] [--cluster-endpoint-url]` | Shows logical topology of the specified cluster.
+| `cluster unit deploy` | `[clusterUrl] [--nodes] <path> <version> <id>` | Deploys a unit for the cluster.
+| `cluster unit undeploy` | `[clusterUrl] <version> <id>` | Undeploys a cluster's unit.
+| `cluster unit list` | `[--plain] [clusterUrl] [--status] [<unitId>[--version]]` | Lists cluster's deployed units.
+| `connect` | `[--username] [--password] --node-url[--node-name]` | Connects to the specified cluster, or to `localhost:3000` if no cluster is specified. All subsequent commands use the specified cluster URL.
+| `disconnect` || Disconnects from the current cluster.
+|===
+
+=== Snapshot Commands
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+| Command| Parameters | Description
+|`cluster snapshot create`|`[--tables <list_of_tables> \| --all]`| Creates a new link:snapshots/snapshots-and-recovery[snapshot], optionally for the specified tables only.
+|`cluster snapshot restore`| `<snapshot_id> [--tables <list_of_tables> \| --all]` | Restores data from the specified snapshot, optionally for the specified tables only.
+|`cluster snapshot status`|`[<snapshot_id> [--all-nodes] [--tables <list_of_tables> \| --all]]`| Provides information about the current status of the specified snapshot, optionally for the specified tables only. If no snapshot ID is specified, provides information about the status of all snapshots.
+|===
+
+=== Node Commands
+
+These commands address specific node(s) in a cluster.
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+| Command| Parameters | Description
+| `node config show` | `[--node-url]/[--node-name] [--selector]` | Gets the specified node configuration.
+|`node config update` | `[--node-url]/[--node-name] [<args>...]` | Updates the local node configuration with the arguments in the HOCON format.
+| `node status` | `[--node-url]/[--node-name]` |Shows the status of the default node, or a different one if specified.
+| `node version` | `[--node-url]/[--node-name]` |Shows the node's build version.
+| `node metric list` | `[--node-url]/[--node-name][--plain]` |Lists node's metrics.
+| `node metric source enable` | `[--node-url]/[--node-name]<srcName>` |Enables a metric source for the node.
+| `node metric source disable` | `[--node-url]/[--node-name]<srcName>` |Disables a metric source for the node.
+| `node metric source list` | `[--node-url]/[--node-name][--plain]` |Lists node metrics' sources.
+| `node unit list` | `[--plain] [--status] [<unitId> [--version]] [--node-url]/[--node-name]]` |Lists node's deployment units.
+|===
+
+=== User and Role Commands
+
+These commands help you manage access to the system - users and and roles.
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+| Command| Parameters | Description
+|`role create`|`[--cluster-endpoint-url] <roleName>`| Creates a new role. New roles do not have any privileges.
+|`role delete`|`[--cluster-endpoint-url] <roleName>`| Deletes the specified role. Role with assigned privileges cannot be deleted.
+|`role list`|`[--plain] [--cluster-endpoint-url] [--user]`| Lists roles for the specified cluster, optionally filtered by user(s).
+|`role show`|`[--with-privileges] [--with-users] [--cluster-endpoint-url] <roleName>`| Shows the role information, optionally including privileges assigned to the role and/or the users who have that role.
+|`role privilege grant`|`[--on] [--cluster-endpoint-url] [--action] --to`| Grants to the specified role a privilege (permission to perform the specified action on an the specified object). For more information, see link:security/permissions[User Permissions and Roles].
+|`role privilege revoke`|`[--action] [--from] [--cluster-endpoint-url] --to`|Revoke privilege(s) (permissions)(s) to perform the specified action on the specified object) from the role. For more information, see link:security/permissions[User Permissions and Roles].
+| `user create` |`[--password] [cluster-endpoint-url] <username>` | Creates a new user.
+| `user delete` |`[--cluster-endpoint-url] [--with-revoke] <username>`| Deletes the user.
+| `user edit` |`[--password] [--cluster-endpoint-url] <username>` |  Edits user configuration.
+| `user list` |`[--plain] [--cluster-endpoint-url] [--role]`| Provides a list of users on the server, optionally filtered by role(s).
+| `user show` |`[--with-roles] [--cluster-endpoint-url] <username>`| Provides extended information about the specific user, optionally with information about roles assigned to the user.
+| `user role assign` | `[--profile] [--cluster-endpoint-url] [--role] [--to]`| Assigns role(s) to the specified user.
+| `user role revoke` | `[--profile] [--cluster-endpoint-url] [--from] [--role]`| Revokes role(s) from the specified user.
+|===
+
+=== Miscellaneous Commands
+
+These are general-purpose commands.
+
+[cols="1,1,3",opts="header", stripes=none]
+|===
+| Command| Parameters | Description
+| `cls` || Clears the terminal.
+| `exit` || Stops the current interactive session.
+| `help` | `<command or command group>` | Provides information on available command groups, commands in the specified group, or for the specified command.
+| `sql` | `[--plain] [jdbc-url] [--script-file] <command>`| Executes the specified SQL query (command) or teh queries included in the specified file, on the specified cluster.
+|`token revoke`|`[--cluster-endpoint-url] [--profile] [--token\|--username]`| Revokes the link:security/jwt[JWT token]. Can revoke a specific token, or all tokens from the user.
+| `version` || Displays the current CLI tool version.
 |===
\ No newline at end of file
diff --git a/docs/_docs/images/data_streaming.png b/docs/_docs/images/data_streaming.png
new file mode 100644
index 0000000..27a0120
--- /dev/null
+++ b/docs/_docs/images/data_streaming.png
Binary files differ
diff --git a/docs/_docs/images/jmc-metrics.png b/docs/_docs/images/jmc-metrics.png
new file mode 100644
index 0000000..be0b0fe
--- /dev/null
+++ b/docs/_docs/images/jmc-metrics.png
Binary files differ
diff --git a/docs/_docs/images/rbac.png b/docs/_docs/images/rbac.png
new file mode 100644
index 0000000..1feb0b1
--- /dev/null
+++ b/docs/_docs/images/rbac.png
Binary files differ
diff --git a/docs/_docs/includes/ignite-cli-download-windows.adoc b/docs/_docs/includes/ignite-cli-download-windows.adoc
index a62d911..1625c39 100644
--- a/docs/_docs/includes/ignite-cli-download-windows.adoc
+++ b/docs/_docs/includes/ignite-cli-download-windows.adoc
@@ -14,5 +14,5 @@
 // limitations under the License.
 
 // tag::command[]
-curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-3.0.0-beta1.zip" -o ignite3-3.0.0-beta1.zip
+curl "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta2/ignite3-3.0.0-beta1.zip" -o ignite3-3.0.0-beta1.zip
 // end::command[]
diff --git a/docs/_docs/index.adoc b/docs/_docs/index.adoc
index b658912..bcd0562 100644
--- a/docs/_docs/index.adoc
+++ b/docs/_docs/index.adoc
@@ -12,51 +12,34 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-= Apache Apache Ignite 3
+= Apache Ignite 3 Beta 2
 
 Apache Ignite 3 is a distributed database for high-performance computing with in-memory speed.
 
-Apache Ignite 3 brings a number of improvements compared to Apache Ignite 2:
+Apache Ignite 3 brings a number of improvements compared to Ignite 2:
 
-* **Industry-standard libraries**: Apache Ignite 3 switches from custom libraries used in Apache Ignite 2 towards industry-standard libraries, bringing the latest developments from all over the world to your environment.
+* **Industry-standard libraries**: Ignite 3 switches from custom libraries used in Ignite 2 towards industry-standard libraries, bringing the latest developments from all over the world to your environment.
 - SWIM replaces Ring and Discovery protocols, bringing even better stability and performance on large topologies.
 - Raft replaces Ignite replication, providing an improved consensus algorithm, out of the box split brain protection and much more.
-- SQL Calcite replaces H2 engine used in Apache Ignite 2.
-* **Pluggable storages**: With Apache Ignite 3, you can choose the best storage for your needs. With this beta release you can choose between page memory and RocksDB, and more options may be added later.
-* **New transaction protocol**: Apache Ignite 3 provides a new custom protocol for handling transactions, built up on previous experience with Apache Ignite 2. It brings multiple features such as read-only transactions, SQL transactions and much more.
-* **Dynamic configuration**: HOCON configuration replaces XML-based configuration of Apache Ignite 2. All configuration properties can now be changed at runtime as well, by using API or command-line tool.
-* **Ignite CLI tool**: The new Apache Ignite tool that functions as a single sign-on for any operational, management, and development needs.
+- SQL Calcite replaces H2 engine used in Ignite 2.
+* **Pluggable storages**: With Ignite 3, you can choose the best storage for your needs. With this  release you can choose between page memory and RocksDB, and more options may be added later.
+* **New transaction protocol**: Ignite 3 provides a new custom protocol for handling transactions, built up on previous experience with Ignite 2. It brings multiple features such as read-only transactions, SQL transactions and much more.
+* **Dynamic configuration**: HOCON configuration replaces XML-based configuration of Ignite 2. All configuration properties can now be changed at runtime as well, by using API or command-line tool.
+* **Improved CLI tool**: The new Ignite tool that functions as a single sign-on for any operational, management, and development needs.
 * See a link:https://cwiki.apache.org/confluence/display/IGNITE/Proposals+for+Ignite+3.0[full list of improvements,window=_blank].
-
-== Known Beta 1 Limitations
-
-Some features are not fully implemented in beta. These features are expected to be working in full release:
-
-- Scaling the cluster is not fully implemented. You can add tables to the topology or remove them from it, but not scale up an existing table.
-- Operations performed on unstable topology are not guaranteed to be reliable.
-- Restarting the running cluster is not fully implemented - restart under load is not reliable, additionally the restart must be performed in correct order - first stopping partition nodes, then metastore, and finally cluster management group, and bringing them up in reverse order.
-- Database vacuuming is not implemented.
-
-== New in Beta 1
-
-Apache Ignite 3 Beta 1 release includes the following features:
+//The link above os to AI, not GG - os that OK?
 
 
-- RPM and DEB packages: simplified installation and node management
-with system services.
-- SQL Transactions.
-- Transactional Protocol: improved locking model, multi-version based
-lock-free read-only transactions.
-- Storage: A number of improvements to memory-only and on-disk engines
-based on Page Memory.
-- Indexes: Basic functionality, hash and sorted indexes.
-- Client logging: A LoggerFactory may be provided during client
-creation to specify a custom logger for logs generated by the client.
-- Metrics framework: Collection and export of cluster metrics.
-- Client's Partition Awareness: Clients are now aware of data
-distribution over the cluster nodes which helps avoid additional
-network transmissions and lowers operations latency.
-- C++ client:  Basic C++ client, able to perform operations on data.
-- Autogenerated values: now a function can be specified as a default
-value generator during a table creation. Currently only
-gen_random_uuid is supported.
+== Raft Consensus Algorithm
+
+Apache Ignite 3 is using Raft consensus algorithm. This comes with a large number of consistency improvements:
+
+=== Split Brain Protection
+
+Raft comes with a built-in election algorithm that provides protection from split-brain issues. When working with data, Raft select a leader and always trusts the elected leader. If an issue, for example a network partition, happens, the leader of the larger cluster will assume the role of the overall leader until the database is back in order, after which the other nodes will have their log updated to that of the leader. Commits to a smaller cluster will be rolled back.
+
+This way, Ignite 3 can recover from even the most dangerous cases of split-brain automatically.
+
+=== Fast Scalability
+
+In modern environments, it is often optimal to dynamically scale cluster size to meet user demands. With Raft, when a new node is added to the cluster, it automatically triggers the rebalancing procedure that moves data to a new node without requiring other nodes to redistribute load between themselves.
diff --git a/docs/_docs/installation/deb-rpm.adoc b/docs/_docs/installation/deb-rpm.adoc
index 6c161a4..620965b 100644
--- a/docs/_docs/installation/deb-rpm.adoc
+++ b/docs/_docs/installation/deb-rpm.adoc
@@ -14,7 +14,7 @@
 // limitations under the License.
 = Installing Using DEP and RPM Package
 
-Apache Ignite can be installed from the official link:https://www.apache.org/dist/ignite/rpm[RPM] or link:https://www.apache.org/dist/ignite/deb[DEB] repositories.
+Ignite can be installed from the official link:https://www.apache.org/dist/ignite/rpm[RPM] or link:https://www.apache.org/dist/ignite/deb[DEB] repositories.
 
 == Installing Deb Package
 
@@ -37,46 +37,27 @@
 //sudo apt install apache-ignite --no-install-recommends
 //----
 
-. Download the archive with the Ignite CLI tool:
-+
-[tabs]
---
-tab:deb[]
-[source,shell]
-----
-curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-cli_3.0.0~beta1_all.deb" -o ignite3-cli_3.0.0-beta1_all.deb
-
-curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-db_3.0.0~beta1_all.deb" -o ignite3-db_3.0.0-beta1_all.deb
-----
-
-tab:RPM[]
-[source,shell]
-----
-curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-cli-3.0.0~beta1.noarch.rpm" -o ignite3-cli-3.0.0~beta1.noarch.rpm
-
-curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-db-3.0.0~beta1.noarch.rpm" -o ignite3-db-3.0.0~beta1.noarch.rpm
-----
---
-
-
-
-Install the Apache Ignite 3 package:
+Install the Ignite 3 package:
 
 [tabs]
 --
 tab:deb[]
 [source, shell]
 ----
-sudo apt install ignite3-db_3.0.0-beta1_all.deb --no-install-recommends
-sudo apt install ignite3-cli_3.0.0-beta1_all.deb --no-install-recommends
+sudo apt install ignite-db-3.0.0-beta2.deb --no-install-recommends
+sudo apt install ignite-cli-3.0.0-beta2.deb --no-install-recommends
 ----
 
 tab:RPM[]
 [source,shell]
 ----
-sudo rpm -i ignite3-db-3.0.0~beta1.noarch.rpm
+sudo rpm -i ignite-db-3.0.0-beta2.noarch.rpm
 
-sudo rpm -i ignite3-cli-3.0.0~beta1.noarch.rpm
+sudo rpm -i ignite-cli-3.0.0-beta2.noarch.rpm
 ----
 
---
\ No newline at end of file
+--
+
+== Next Steps
+
+With the Ignite installed, you can proceed with the link:quick-start/getting-started-guide[Getting Started] or link:developers-guide/table-api[use the available APIs] immediately.
\ No newline at end of file
diff --git a/docs/_docs/installation/installing-using-docker.adoc b/docs/_docs/installation/installing-using-docker.adoc
index 759b3be..8b2932d 100644
--- a/docs/_docs/installation/installing-using-docker.adoc
+++ b/docs/_docs/installation/installing-using-docker.adoc
@@ -18,13 +18,13 @@
 
 Ignite was tested on:
 
-include::../includes/prereqs.adoc[]
+include::includes/prereqs.adoc[]
 
 == Building Docker Image
 
-Apache Ignite 3 Beta is not currently available on DockerHub. To use a Docker image, you need to build it locally:
+Ignite 3 is not currently available on DockerHub. To use a Docker image, you need to build it locally:
 
-- Check out the Apache Ignite 3 link:https://github.com/apache/ignite-3[repository].
+- Check out the Ignite 3 link:https://github.com/apache/ignite-3[repository].
 - Run the Gradle command to build a Docker image:
 ----
 ./gradlew docker
diff --git a/docs/_docs/installation/installing-using-exe.adoc b/docs/_docs/installation/installing-using-exe.adoc
index e4368df..3e908a9 100644
--- a/docs/_docs/installation/installing-using-exe.adoc
+++ b/docs/_docs/installation/installing-using-exe.adoc
@@ -18,19 +18,14 @@
 
 Ignite was tested on:
 
-include::../includes/prereqs.adoc[]
+include::includes/prereqs.adoc[]
 
 
 == Installing Using Distributive
 
-WARNING:
-----
-If you have installed one of the Ignite 3 Alpha releases before, please remove the `~/.ignitecfg` file before proceeding. This is a temporary step, the procedure will be improved in the future releases.
-----
+To install Ignite, perform the following steps:
 
-To install Apache Ignite, perform the following steps:
-
-. Navigate to link:https://ignite.apache.org/download.cgi[Apache Ignite Downloads,window=_blank] page to download the distributive.
+. Navigate to link:https://ignite.apache.org/download.cgi[Ignite Downloads,window=_blank] page to download the distributive.
 
 . Put the distributed file into the installation folder in your system.
 
diff --git a/docs/_docs/installation/installing-using-zip.adoc b/docs/_docs/installation/installing-using-zip.adoc
index 63d7c71..999a2e9 100644
--- a/docs/_docs/installation/installing-using-zip.adoc
+++ b/docs/_docs/installation/installing-using-zip.adoc
@@ -18,35 +18,11 @@
 
 Ignite was tested on:
 
-include::../includes/prereqs.adoc[]
+include::includes/prereqs.adoc[]
 
 
 == Installing Using ZIP Archive
 
-
-. Download the archive with the Ignite CLI tool:
-+
-[tabs]
---
-tab:Unix[]
-[source,shell]
-----
-curl -L "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=ignite/3.0.0-beta1/ignite3-3.0.0-beta1.zip" -o ignite3-3.0.0-beta1.zip
-----
-
-tab:Windows (PowerShell)[]
-[source,shell]
-----
-include::../includes/ignite-cli-download-windows.adoc[tag=command,indent=0]
-----
-
-tab:Windows (CMD)[]
-[source,shell]
-----
-include::../includes/ignite-cli-download-windows.adoc[tag=command,indent=0]
-----
---
-
 . Unpack the archive:
 +
 [tabs]
@@ -54,47 +30,37 @@
 tab:Unix[]
 [source,shell]
 ----
-unzip ignite3-3.0.0-beta1.zip && cd ignite3-3.0.0-beta1
+unzip ignite-3.0.0-beta2.zip && cd ignite-db-3.0.0-beta2
 ----
 
 
 tab:Windows (PowerShell)[]
 [source,shell]
 ----
-Expand-Archive ignite3-3.0.0-beta1.zip -DestinationPath . ; cd ignite3-3.0.0-beta1
+Expand-Archive ignite-3.0.0-beta2.zip -DestinationPath . ; cd ignite-db-3.0.0-beta2
 ----
 
 tab:Windows (CMD)[]
 [source,shell]
 ----
-tar -xf ignite3-3.0.0-beta1.zip & cd ignite3-3.0.0-beta1
+tar -xf ignite-3.0.0-beta2.zip & cd ignite-db-3.0.0-beta2
 ----
 --
 
-. Add your installation directory to the PATH environment variable:
+. Add the database directory to the PATH environment variable:
 +
 [tabs]
 --
 tab:Unix[]
 [source,shell]
 ----
+cd ignite-db-3.0.0
 export IGNITE_HOME=$(pwd)
 ----
-
-//tab:Windows (PowerShell)[]
-//[source,shell]
-//----
-//$ENV:PATH += ";."
-//----
-
-//tab:Windows (CMD)[]
-//[source,text]
-//----
-//set PATH=%cd%;%PATH%
-//----
 --
 
-//NOTE: On Windows, this latter change is not permanent and only affects the current terminal session. If you want the
-//`ignite` command to be available after terminal or system restart, use the `System Properties` dialog to
-//permanently update the `PATH` environment variable by adding a full path to the Ignite installation directory.
+On Windows, create the `IGNITE_HOME` environment variable with the path to the folder.
 
+== Next Steps
+
+With the Ignite installed, you can proceed with the link:quick-start/getting-started-guide[Getting Started] or link:developers-guide/table-api[use the available APIs] immediately.
\ No newline at end of file
diff --git a/docs/_docs/limitations.adoc b/docs/_docs/limitations.adoc
new file mode 100644
index 0000000..39b0b6e
--- /dev/null
+++ b/docs/_docs/limitations.adoc
@@ -0,0 +1,100 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Known Limitations in Apache Ignite 3 Beta
+
+== Inconsistent RO Operations Across Clients
+
+Read-only transactions may miss data that was written within the last several seconds by a different client. Read-write transactions will always guarantee consistency.
+
+This behavior is intentional, and made to speed up read-only operations. Ignite only guarantees consistency within the same client for read-only transactions.
+
+Depending on the API you use, read operations may be performed in read-only transactions automatically. For example, `SELECT` operations will often be read-only. If you need to perform the operation with guaranteed consistency, perform the explicit read-write transaction.
+
+== No Schema Editing
+
+In Ignite 3 Beta, only the `PUBLIC` schema is supported. You cannot create other schemas. You can still edit the `PUBLIC` schema as required. Creating additional schemas will be supported in the initial release.
+
+== No Meta Storage Compaction
+
+Meta storage compaction is not implemented in this release. This causes meta storage to grow without indefinitely. Clusters running for a long time may require a large amount of storage to store.
+
+== SQL Performance
+
+This release does not include SQL performance optimization. Large schedules may  Performance will be improved in later versions.
+
+== Storage Limitations
+
+=== Limited Storage Types
+
+In this beta release, you can only use one storage type in your cluster.
+
+Only `aipersist` and `rocksdb` storage types are currently supported. RocksDB storage is currently not optimized for performance, and is not recommended for high load environments.
+
+
+=== Single Distribution Zone
+
+In Beta release, only 1 distribution zone is supported. Full release will support multiple distribution zones.
+
+=== No Data Colocation
+
+Data is not colocated automatically for queries. This may have negative effects on performance if required data is not already on the same node.
+
+//== CMG Idempotency is Broken
+//TODO: Check https://ggsystems.atlassian.net/browse/IGN-23898 before release
+
+
+== Exceptions During Node Startup and Shutdown
+
+Currently, the node startup and shutdown processes are not optimized, and exceptions may happen due to multiple Ignite modules starting and stopping at the same time. This should have no effect of cluster
+
+== Datacenter Replication
+
+Tables cannot be removed if replication is in progress, moreover any schema update operation is not possible until replication is finished (operation will be frozen). You need to stop replication before truncating tables.
+Replication will fail if the source or target table schema is not synced. Schema should be synced manually, after need to restart affected replications.
+In case of rolling restart/upgrade all replications should be stopped/paused before nodes restart.
+
+
+== Deployment Units Limitations
+
+This release has the following limitations with the deployment units:
+
+- Huge deployment units may cause out of memory errors.
+
+- If a node with initial deploy shuts down before the deployment unit was split to other cluster nodes, this deployment unit will be lost. To fix it, you would need to undeploy the unit manually and redeploy it.
+
+== Compute API
+
+This release has the following limitations with the distributed computing:
+
+- Compute API is not available in embedded mode.
+
+- Not all classes are supported as a Job result or Job argument.
+
+- The following data types are supported:
+
+* Boolean
+* Integer
+* Float
+* Double
+* Decimal
+* UUID
+* Instant
+* Date
+
+== Data Import and Export
+
+Partitioned import for parquet misses partitioned column.
+
+Schema inference is not supported for data import.
\ No newline at end of file
diff --git a/docs/_docs/quick-start/embedded-mode.adoc b/docs/_docs/quick-start/embedded-mode.adoc
new file mode 100644
index 0000000..5f2544c
--- /dev/null
+++ b/docs/_docs/quick-start/embedded-mode.adoc
@@ -0,0 +1,122 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Starting With Embedded Mode
+
+In most scenarios, you would use Ignite CLI tool to start and manage your Ignite cluster. However, in some scenarios it is preferable to manage the cluster from a Java project. Starting and working with the cluster from code is called "embedded mode".
+
+This tutorial covers how you can start Ignite 3 from your Java project.
+
+NOTE: Unlike in Ignite 2, nodes in Ignite 3 are not separated into client and server nodes. Nodes started from embedded mode will be used to store data by default. To avoid this,
+
+== Prerequisites
+
+Ignite 3 was tested on:
+
+include::includes/prereqs.adoc[]
+
+== Add Ignite to Your Project
+
+//TODO: Replace this with real maven later
+
+First, you need to add Ignite to your project. The easiest way to do this is add Ignite to your project is by using Maven:
+
+[source, xml]
+----
+
+<dependency>
+    <groupId>org.apache.ignite</groupId>
+    <artifactId>ignite-api</artifactId>
+    <version>3.0.0-beta2</version>
+    <scope>system</scope>
+    <systemPath>{path_to_jar}</systemPath>
+</dependency>
+
+<dependency>
+    <groupId>org.apache.ignite</groupId>
+    <artifactId>ignite-runner</artifactId>
+    <version>3.0.0-beta2</version>
+    <scope>system</scope>
+    <systemPath>{path_to_jar}</systemPath>
+</dependency>
+----
+
+== Prepare Ignite Configuration
+
+To start a Ignite node, you will need a Ignite configuration file that specifies all configuration properties of the node. For this tutorial, we recommend link:installation/installing-using-zip[installing] Ignite 3 and using a default configuration file from it. This file is stored in the `ignite-db-{version}/etc/ignite-config.conf` file.
+
+== Start Ignite Server Nodes
+
+To start a Ignite node, use the following code snippet:
+
+[source, java]
+----
+CompletableFuture<Ignite> igniteFuture = IgnitionManager.start(
+    "node1",
+    "{config_path}",
+    "{workdir_path}"
+);
+----
+
+This code snippet starts a Ignite node with the name `node1`, that uses the configuration from the file specified in the `{config_path}` path parameter and uses the folder specified in the `{workdir_path}` path parameter to store data. When the node is started, this method returns an instance of `Ignite` class that can be used to work with the node.
+
+== Initiate a Cluster
+
+Started nodes find each other by default, but they do not form an intractable cluster unless the cluster is initiated. You need to initiate the cluster to activate the node. If there are multiple nodes, once the cluster is activated, they will form a topology and automatically distribute workload between each other.
+
+Use the code snippet below to initiate a cluster:
+
+[source, java]
+----
+InitParameters initParameters = InitParameters.builder()
+    .destinationNodeName("node1")
+    .metaStorageNodeNames(List.of("node1"))
+    .clusterName("cluster")
+    .build();
+
+IgnitionManager.init(initParameters);
+----
+
+== Get an Ignite Instance
+
+Now that the cluster is started, you can get an instance of the `Ignite` class:
+
+[source, java]
+----
+Ignite ignite = igniteFuture.get();
+----
+
+This instance can be used to start working with the cluster. The future will be returned once the cluster is active.
+
+For example, here is how you can create a new table by using an SQL API:
+
+[source, java]
+----
+try (ResultSet rs = ignite.sql().createSession().execute(null,
+        "CREATE TABLE CREATE TABLE IF NOT EXISTS Person (id int primary key,city_id int,name varchar,age int,company varchar)")
+) {
+    // no-op
+}
+----
+
+NOTE: Session is closable, but it is safe to skip `close()` method for DDL and DML queries, as they do not keep cursor open.
+
+More examples of working with Ignite can be found in the link:https://github.com/apache/ignite-3/tree/main/examples[examples] repository.
+
+== Next Steps
+
+From here, you may want to:
+
+* Check out the link:developers-guide/table-api[Developers guide] page for more information on available APIs
+* Try out our link:https://github.com/apache/ignite-3/tree/main/examples[examples]
\ No newline at end of file
diff --git a/docs/_docs/quick-start/getting-started-guide.adoc b/docs/_docs/quick-start/getting-started-guide.adoc
index 526c7d0..7218647 100644
--- a/docs/_docs/quick-start/getting-started-guide.adoc
+++ b/docs/_docs/quick-start/getting-started-guide.adoc
@@ -12,90 +12,123 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-= Getting Started Guide
+= Getting Started With Ignite 3
 
-This guide provides essential details on how to start working with Ignite 3.0 Beta by using the Ignite CLI tool, including the following information:
+This guide shows you how to start working with Ignite 3 using the Ignite CLI.
 
-* How to use the Ignite CLI tool to manage nodes.
-* How to run built-in examples.
-* Additional information regarding the use of the tool.
+//NOTE: This is called standalone mode.
 
 == Prerequisites
 
+Ignite 3 was tested on:
 
-Ignite 3.0 Beta was officially tested on:
+include::includes/prereqs.adoc[]
 
-include::../includes/prereqs.adoc[]
+== Install Ignite
 
-== Starting Ignite Node
+To start working with Ignite, you need to install it as described in the link:installation/installing-using-zip[Installation] section.
 
-In this guide we will use a link:../installation/installing-using-zip[zip] archive installation. To start an Ignite node, go to the `ignite3-db-3.0.0-beta1` folder and run the following command:
+NOTE: The Unzip operation creates two side-by-side directories: `ignite-db-3.0.0-beta2` and `ignite-cli-3.0.0-beta2`.
 
+== Start Ignite Node
 
+When starting to work with Ignite, first we need to start a Ignite node that will be handling the API calls.
+
+. Navigate to the `ignite-db-3.0.0-beta2` directory.
+. Run the `start` command:
++
 [tabs]
 --
 tab:Linux[]
+[source, shell]
 ----
 bin/ignite3db start
 ----
 
 tab:Windows[]
-NOTE: You need to install Java in Bash environment to run Ignite on Windows.
+NOTE: You need to install Java in the Bash environment to run Ignite on Windows.
+[source, bash]
 ----
 bash bin\ignite3db start
 ----
 --
++
+Successful start produces the following output:
++
+----
+Starting Ignite 3...
+Node named defaultNode started successfully. REST addresses are [http://127.0.0.1:10300]”
+----
 
+== Optional: Start Multiple Ignite Nodes in Docker
 
+Ignite 3 is designed to work in a cluster of 3 or more nodes at once. While a single node can be used in some scenarios and can be used for the tutorial, having multiple nodes in a cluster is the most common use case.
 
-== Using Ignite CLI Tool
+To run multiple instances of Ignite, you would normally install it on multiple machines before starting a cluster. If you want to emulate a Ignite cluster for this tutorial, use the docker-compose file provided to start them instead.
 
-Ignite CLI is a single entry point for all the Ignite operations. In this guide we will use it to start a cluster on a node you started on the previous step and execute a simple SQL query.
+== Optional: Set Custom JVM Options
 
-//* Manage existing nodes.
-//* Install new nodes.
-//* Update old nodes to new versions.
-//* Install or remove optional modules.
+You can set custom JVM options when starting the Ignite node by using the `IGNITE3_EXTRA_JVM_ARGS` parameter in the `${IGNITE_HOME}/etc/vars.env` file.
 
+You add the options to the `IGNITE3_EXTRA_JVM_ARGS` parameter using the `-XX` notation. For example:
+
+`IGNITE3_EXTRA_JVM_ARGS  = -XX:+PrintFlagsFinal`
+
+For additional information, see link:https://opensource.com/article/22/4/jvm-parameters-java-developers[this article].
+
+== Activate the Ignite CLI
+
+You initialize your cluster, and perform operations on that cluster, using the link:ignite-cli-tool[Ignite CLI].
+
+To activate the Ignite CLI:
+
+. Navigate to the `ignite-cli-3.0.0-2` directory.
+. Run the following command:
++
 [tabs]
 --
 tab:Linux[]
+[source, shell]
 ----
-ignite3-cli-3.0.0-beta1/bin/ignite3
+bin/ignite3
 ----
 
 tab:Windows[]
-NOTE: You need to install Java in Bash environment to run Ignite 3 beta on Windows.
+NOTE: You need to install Java in the Bash environment to run Ignite on Windows.
+[source, bash]
 ----
-bash ignite3-cli-3.0.0-beta1\bin\ignite3
+bash bin\ignite3
 ----
 --
 
-After you start the node, you need to initialize a cluster. To do this, use the `cluster init` command:
+== Initialize Your Cluster
 
+To initialize the cluster that includes the nodes you have started:
+
+. Run the following command:
++ 
 ----
 cluster init -n=sampleCluster -m=defaultNode
 ----
 
-This command creates a simple cluster you can interact with. For example, you can perform sql requests from the command line, or use the thin client to perform requests.
+== Run SQL Statements Against the Cluster
 
-NOTE: If you have issues with Ignite 3 Alpha, check the logs in the `ignite3-db-3.0.0-beta1/log` folder.
+Once your cluster has been initialized, you can manage it with SQL commands:
 
-
-* Use the `CREATE TABLE` statement to create a new table:
+. Use the `CREATE TABLE` statement to create a new table:
 +
 ----
 sql "CREATE TABLE IF NOT EXISTS Person (id int primary key,  city varchar,  name varchar,  age int,  company varchar)"
 ----
 +
-* Fill the table with data by using `INSERT` statement:
+. Fill the table with data using the `INSERT` statement:
 +
 ----
 sql "INSERT INTO Person (id, city, name, age, company) VALUES ('1', 'London', 'John Doe', '42', 'Apache')"
 sql "INSERT INTO Person (id, city, name, age, company) VALUES ('2', 'New York', 'Jane Doe', '36', 'Apache')"
 ----
 +
-* Get all data you inserted on previous step:
+. Get all data you inserted in the previous step:
 +
 ----
 sql "SELECT * FROM Person"
@@ -103,10 +136,38 @@
 
 NOTE: For more information about available SQL statements, see the link:sql-reference/ddl[SQL Reference] section.
 
-== Stopping the Cluster
+== Manage Cluster Configuration
 
-After you are done working on the cluster, you need to stop the node. Go to the `ignite3-db-3.0.0-beta1` folder and run the following command:
+You can change your cluster or node configuration by using the Ignite CLI.
 
+. To see the current configuration, run the following command:
++
+----
+node config show --node-url http://localhost:10300
+----
++  
+This command prints the configuration file in the HOCON format. Note the `maxSize` value under `aimen.regions`.
++
+. Request an increase of the `maxSize` value:
++
+----
+node config update --node-url http://localhost:10300 {aimem.regions:[{name:btree_volatile_region,maxSize:412000000}]}
+----
++
+. To verify the result, run the `show` command again:
++
+----
+node config show --node-url http://localhost:10300
+----
++
+Note that `maxSize` has increased to the value you had requested. 
+
+== Stop the Cluster
+
+After you are done working with your cluster, you need to stop it:
+
+. Navigate to the `ignite-db-3.0.0-beta2` folder.
+. Run the `stop` command:
 
 [tabs]
 --
@@ -125,10 +186,5 @@
 
 From here, you may want to:
 
-* Check out the link:ignite-cli-tool[Ignite CLI Tool] page for more detail on supported commands.
-* Try out our link:https://github.com/apache/ignite-3/tree/main/examples[examples].
-
-//== Apache Ignite Configuration
-//
-//Apache Ignite uses HOCON configuration format.
-//For more detail, please see the link:https://github.com/lightbend/config/blob/master/HOCON.md[HOCON documentation,window=_blank].
+* Check out the link:ignite-cli-tool[Ignite CLI Tool] page for more detail on supported commands
+* Try out our link:https://github.com/apache/ignite-3/tree/main/examples[examples]
\ No newline at end of file
diff --git a/docs/_docs/rebalance.adoc b/docs/_docs/rebalance.adoc
deleted file mode 100644
index 9a897c5..0000000
--- a/docs/_docs/rebalance.adoc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Data Rebalancing
-
-When a new node joins the cluster, some of the partitions are relocated to the new node so that the data remains distributed equally in the cluster. This process is called data rebalancing.
-If an existing node permanently leaves the cluster and backups are not configured, you lose the partitions stored on this node. When backups are configured, one of the backup copies of the lost partitions becomes a primary partition and the rebalancing process is initiated.
-
-WARNING: Data rebalancing is triggered by changes in the Baseline Topology. In pure in-memory clusters, the default behavior is to start rebalancing immediately when a node leaves or joins the cluster (the baseline topology changes automatically). In clusters with persistence, the baseline topology has to be changed manually (default behavior), or can be changed automatically when automatic baseline adjustment is enabled.
diff --git a/docs/_docs/rest/reference.adoc b/docs/_docs/rest/reference.adoc
deleted file mode 100644
index a2613aa..0000000
--- a/docs/_docs/rest/reference.adoc
+++ /dev/null
@@ -1,1675 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-= Ignite REST module
-
-== Endpoints
-
-
-[.ClusterConfiguration]
-=== ClusterConfiguration
-
-
-[.getClusterConfiguration]
-==== getClusterConfiguration
-
-`GET /management/v1/configuration/cluster`
-
-
-
-===== Description
-
-Gets the current configuration of the cluster.
-
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-
-<<String>>
-
-
-===== Content Type
-
-* text/plain
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Get cluster configuration
-|  <<String>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-
-| 404
-| Configuration not found. Most likely, the cluster is not initialized.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/cluster/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.getClusterConfigurationByPath]
-==== getClusterConfigurationByPath
-
-`GET /management/v1/configuration/cluster/{path}`
-
-
-
-===== Description
-
-Gets the configuration on the specific path. Configuration is in HOCON format
-
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/\{path\}/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-====== Path Parameters
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| path
-| Configuration tree address. For example: &#x60;element.subelement&#x60;. 
-| X
-| null
-| 
-
-|===
-
-
-
-
-
-
-===== Return Type
-
-
-<<String>>
-
-
-===== Content Type
-
-* text/plain
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Configuration of the cluster on the specified path.
-|  <<String>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-
-| 404
-| Configuration not found. Most likely, the cluster is not initialized.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/\{path\}/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/\{path\}/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/cluster/{path}/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/\{path\}/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.updateClusterConfiguration]
-==== updateClusterConfiguration
-
-`PATCH /management/v1/configuration/cluster`
-
-
-
-===== Description
-
-Updates cluster configuration. New configuration should be provided in HOCON format.
-
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/PATCH/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-====== Body Parameter
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| body
-|  <<string>>
-| X
-| 
-| 
-
-|===
-
-
-
-
-
-===== Return Type
-
-
-<<Object>>
-
-
-===== Content Type
-
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Configuration updated.
-|  <<Object>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-
-| 404
-| Configuration not found. Most likely, the cluster is not initialized.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/PATCH/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/cluster/PATCH/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/cluster/PATCH/PATCH.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/cluster/PATCH/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.ClusterManagement]
-=== ClusterManagement
-
-
-[.clusterState]
-==== clusterState
-
-`GET /management/v1/cluster/state`
-
-
-
-===== Description
-
-Returns current cluster status.
-
-
-// markup not found, no include::{specDir}management/v1/cluster/state/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-<<ClusterState>>
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Cluster status returned.
-|  <<ClusterState>>
-
-
-| 404
-| Cluster state not found. Most likely, the cluster is not initialized.
-|  <<Problem>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/state/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/state/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/cluster/state/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/cluster/state/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.init]
-==== init
-
-`POST /management/v1/cluster/init`
-
-
-
-===== Description
-
-Initializes a new cluster.
-
-
-// markup not found, no include::{specDir}management/v1/cluster/init/POST/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-====== Body Parameter
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| InitCommand
-|  <<InitCommand>>
-| X
-| 
-| 
-
-|===
-
-
-
-
-
-===== Return Type
-
-
-<<Object>>
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Cluster initialized.
-|  <<Object>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/init/POST/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/init/POST/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/cluster/init/POST/POST.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/cluster/init/POST/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.NodeConfiguration]
-=== NodeConfiguration
-
-
-[.getNodeConfiguration]
-==== getNodeConfiguration
-
-`GET /management/v1/configuration/node`
-
-
-
-===== Description
-
-Gets node configuration in HOCON format.
-
-
-// markup not found, no include::{specDir}management/v1/configuration/node/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-
-<<String>>
-
-
-===== Content Type
-
-* text/plain
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Whole node configuration.
-|  <<String>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/node/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/node/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.getNodeConfigurationByPath]
-==== getNodeConfigurationByPath
-
-`GET /management/v1/configuration/node/{path}`
-
-
-
-===== Description
-
-Gets a configuration of a specific node, in HOCON format.
-
-
-// markup not found, no include::{specDir}management/v1/configuration/node/\{path\}/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-====== Path Parameters
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| path
-| Configuration tree address. For example: &#x60;element.subelement&#x60;. 
-| X
-| null
-| 
-
-|===
-
-
-
-
-
-
-===== Return Type
-
-
-<<String>>
-
-
-===== Content Type
-
-* text/plain
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Returned node configuration.
-|  <<String>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/\{path\}/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/\{path\}/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/node/{path}/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/node/\{path\}/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.updateNodeConfiguration]
-==== updateNodeConfiguration
-
-`PATCH /management/v1/configuration/node`
-
-
-
-===== Description
-
-Updates node configuration. New configuration should be provided in HOCON format.
-
-
-// markup not found, no include::{specDir}management/v1/configuration/node/PATCH/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-====== Body Parameter
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| body
-|  <<string>>
-| X
-| 
-| 
-
-|===
-
-
-
-
-
-===== Return Type
-
-
-<<Object>>
-
-
-===== Content Type
-
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Configuration successfully updated.
-|  <<Object>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 400
-| Incorrect configuration.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/PATCH/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/configuration/node/PATCH/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/configuration/node/PATCH/PATCH.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/configuration/node/PATCH/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.NodeManagement]
-=== NodeManagement
-
-
-[.nodeState]
-==== nodeState
-
-`GET /management/v1/node/state`
-
-
-
-===== Description
-
-Gets current network status.
-
-
-// markup not found, no include::{specDir}management/v1/node/state/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-<<NodeState>>
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Current node status.
-|  <<NodeState>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/node/state/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/node/state/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/node/state/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/node/state/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.nodeVersion]
-==== nodeVersion
-
-`GET /management/v1/node/version`
-
-
-
-===== Description
-
-Gets the version of Apache Ignite the node uses.
-
-
-// markup not found, no include::{specDir}management/v1/node/version/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-
-<<String>>
-
-
-===== Content Type
-
-* text/plain
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Node version.
-|  <<String>>
-
-
-| 500
-| Internal error
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/node/version/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/node/version/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/node/version/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/node/version/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.NodeMetric]
-=== NodeMetric
-
-
-[.disableNodeMetric]
-==== disableNodeMetric
-
-`POST /management/v1/metric/node/disable`
-
-
-
-===== Description
-
-Disables a specific metric source.
-
-
-// markup not found, no include::{specDir}management/v1/metric/node/disable/POST/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-====== Body Parameter
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| body
-|  <<string>>
-| X
-| 
-| 
-
-|===
-
-
-
-
-
-===== Return Type
-
-
-
--
-
-===== Content Type
-
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Metric source disabled.
-|  <<>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 404
-| Metric source not found.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/disable/POST/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/disable/POST/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/metric/node/disable/POST/POST.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/metric/node/disable/POST/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.enableNodeMetric]
-==== enableNodeMetric
-
-`POST /management/v1/metric/node/enable`
-
-
-
-===== Description
-
-Enables a specific metric source.
-
-
-// markup not found, no include::{specDir}management/v1/metric/node/enable/POST/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-====== Body Parameter
-
-[cols="2,3,1,1,1"]
-|===
-|Name| Description| Required| Default| Pattern
-
-| body
-|  <<string>>
-| X
-| 
-| 
-
-|===
-
-
-
-
-
-===== Return Type
-
-
-
--
-
-===== Content Type
-
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Metric source enabled.
-|  <<>>
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-
-| 404
-| Metric source not found.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/enable/POST/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/enable/POST/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/metric/node/enable/POST/POST.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/metric/node/enable/POST/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.listNodeMetrics]
-==== listNodeMetrics
-
-`GET /management/v1/metric/node`
-
-
-
-===== Description
-
-Provides a list of all available metric sources.
-
-
-// markup not found, no include::{specDir}management/v1/metric/node/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-array[<<MetricSource>>]
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Returned a list of metric sources.
-| List[<<MetricSource>>] 
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/metric/node/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/metric/node/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/metric/node/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.Topology]
-=== Topology
-
-
-[.logical]
-==== logical
-
-`GET /management/v1/cluster/topology/logical`
-
-
-
-===== Description
-
-Gets information about logical cluster topology.
-
-
-// markup not found, no include::{specDir}management/v1/cluster/topology/logical/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-array[<<ClusterNode>>]
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Logical topology returned.
-| List[<<ClusterNode>>] 
-
-
-| 404
-| Logical topology not found. Most likely, the cluster is not initialized.
-|  <<Problem>>
-
-
-| 500
-| Internal error
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/topology/logical/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/topology/logical/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/cluster/topology/logical/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/cluster/topology/logical/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[.physical]
-==== physical
-
-`GET /management/v1/cluster/topology/physical`
-
-
-
-===== Description
-
-Gets information about physical cluster topology.
-
-
-// markup not found, no include::{specDir}management/v1/cluster/topology/physical/GET/spec.adoc[opts=optional]
-
-
-
-===== Parameters
-
-
-
-
-
-
-
-===== Return Type
-
-array[<<ClusterNode>>]
-
-
-===== Content Type
-
-* application/json
-* application/problem+json
-
-===== Responses
-
-.http response codes
-[cols="2,3,1"]
-|===
-| Code | Message | Datatype
-
-
-| 200
-| Physical topology returned.
-| List[<<ClusterNode>>] 
-
-
-| 500
-| Internal error.
-|  <<Problem>>
-
-|===
-
-===== Samples
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/topology/physical/GET/http-request.adoc[opts=optional]
-
-
-// markup not found, no include::{snippetDir}management/v1/cluster/topology/physical/GET/http-response.adoc[opts=optional]
-
-
-
-// file not found, no * wiremock data link :management/v1/cluster/topology/physical/GET/GET.json[]
-
-
-ifdef::internal-generation[]
-===== Implementation
-
-// markup not found, no include::{specDir}management/v1/cluster/topology/physical/GET/implementation.adoc[opts=optional]
-
-
-endif::internal-generation[]
-
-
-[#models]
-== Models
-
-
-[#ClusterNode]
-=== _ClusterNode_ 
-
-Information about the cluster node.
-
-[.fields-ClusterNode]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| id
-| 
-| String 
-| Node ID.
-|  
-
-| name
-| 
-| String 
-| Unique cluster name.
-|  
-
-| address
-| 
-| NetworkAddress 
-| 
-|  
-
-| metadata
-| 
-| NodeMetadata 
-| 
-|  
-
-|===
-
-
-[#ClusterState]
-=== _ClusterState_ 
-
-Information about current cluster state.
-
-[.fields-ClusterState]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| cmgNodes
-| 
-| List  of <<string>>
-| List of cluster management group nodes. These nodes are responsible for maintaining RAFT cluster topology.
-|  
-
-| msNodes
-| 
-| List  of <<string>>
-| List of metastorage nodes. These nodes are responsible for storing RAFT cluster metadata.
-|  
-
-| igniteVersion
-| 
-| String 
-| Version of Apache Ignite that the cluster was created on.
-|  
-
-| clusterTag
-| 
-| ClusterTag 
-| 
-|  
-
-|===
-
-
-[#ClusterTag]
-=== _ClusterTag_ 
-
-Unique tag that identifies the cluster.
-
-[.fields-ClusterTag]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| clusterId
-| 
-| UUID 
-| Unique cluster UUID. Generated automatically.
-| uuid 
-
-| clusterName
-| 
-| String 
-| Unique cluster name.
-|  
-
-|===
-
-
-[#InitCommand]
-=== _InitCommand_ 
-
-
-
-[.fields-InitCommand]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| metaStorageNodes
-| 
-| List  of <<string>>
-| A list of RAFT metastorage nodes.
-|  
-
-| cmgNodes
-| 
-| List  of <<string>>
-| A list of RAFT cluster management nodes.
-|  
-
-| clusterName
-| 
-| String 
-| The name of the cluster.
-|  
-
-|===
-
-
-[#InvalidParam]
-=== _InvalidParam_ 
-
-Information about invalid request parameter.
-
-[.fields-InvalidParam]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| name
-| 
-| String 
-| Parameter name.
-|  
-
-| reason
-| 
-| String 
-| The issue with the parameter.
-|  
-
-|===
-
-
-[#MetricSource]
-=== _MetricSource_ 
-
-Metric sources provided by modules.
-
-[.fields-MetricSource]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| name
-| 
-| String 
-| Metric source name.
-|  
-
-| enabled
-| 
-| Boolean 
-| If True, the metric is tracked. Otherwise, the metric is not tracked.
-|  
-
-|===
-
-
-[#NetworkAddress]
-=== _NetworkAddress_ 
-
-Node network address information.
-
-[.fields-NetworkAddress]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| host
-| 
-| String 
-| Name of the host node is on.
-|  
-
-| port
-| 
-| Integer 
-| Port the node runs on.
-| int32 
-
-|===
-
-
-[#NodeMetadata]
-=== _NodeMetadata_ 
-
-Node metadata information.
-
-[.fields-NodeMetadata]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| restHost
-| 
-| String 
-| The host exposed to REST API.
-|  
-
-| httpPort
-| 
-| Integer 
-| The HTTP port exposed to REST API.
-| int32 
-
-| httpsPort
-| 
-| Integer 
-| The HTTPS port exposed to REST API.
-| int32 
-
-|===
-
-
-[#NodeState]
-=== _NodeState_ 
-
-Node state.
-
-[.fields-NodeState]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| name
-| 
-| String 
-| Unique node name.
-|  
-
-| state
-| 
-| State 
-| 
-|  
-
-|===
-
-
-[#Problem]
-=== _Problem_ 
-
-Extended description of the problem with the request.
-
-[.fields-Problem]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-| title
-| 
-| String 
-| Short summary of the issue.
-|  
-
-| status
-| 
-| Integer 
-| Returned HTTP status code.
-| int32 
-
-| code
-| 
-| String 
-| Ignite 3 error code.
-|  
-
-| type
-| 
-| String 
-| URI to documentation regarding the issue.
-|  
-
-| detail
-| 
-| String 
-| Extended explanation of the issue.
-|  
-
-| node
-| 
-| String 
-| Name of the node the issue happened on.
-|  
-
-| traceId
-| 
-| UUID 
-| Unique issue identifier. Thid identifier can be used to find logs related to the issue
-| uuid 
-
-| invalidParams
-| 
-| List  of <<InvalidParam>>
-| Parameters that did not pass validation.
-|  
-
-|===
-
-
-[#State]
-=== _State_ 
-
-Possible node statuses.
-
-[.fields-State]
-[cols="2,1,2,4,1"]
-|===
-| Field Name| Required| Type| Description| Format
-
-|===
-
-
diff --git a/docs/_docs/sql-reference/data-types.adoc b/docs/_docs/sql-reference/data-types.adoc
new file mode 100644
index 0000000..288a772
--- /dev/null
+++ b/docs/_docs/sql-reference/data-types.adoc
@@ -0,0 +1,59 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Data Types
+
+
+The page contains a list of SQL data types available in Ignite such as string, numeric, and date/time types.
+
+Every SQL type is mapped to a programming language or driver specific type that is supported by Ignite natively.
+
+== BOOLEAN
+Possible values: TRUE and FALSE.
+
+== BIGINT
+Possible values: [`-9223372036854775808`, `9223372036854775807`].
+
+== DECIMAL
+Possible values: Exact number of selectable precision.
+
+== DOUBLE
+Possible values: A floating point number.
+
+== INTEGER
+Possible values: [`-2147483648`, `2147483647`].
+
+== REAL
+Possible values: A single precision floating point number.
+
+== SMALLINT
+Possible values: [`-32768`, `32767`].
+
+== VARCHAR
+Possible values: A Unicode String.
+
+== VARBINARY
+Possible values: binary data (“byte array”).
+
+== DATE
+Possible values: The date data type. The format is `yyyy-MM-dd`.
+
+== TIME
+Possible values: The time data type. The format is `hh:mm:ss`.
+
+== TIMESTAMP
+Possible values: The timestamp data type. The format is `yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]`.
+
+== UUID
+Possible values: Universally unique identifier. This is a 128 bit value.
\ No newline at end of file
diff --git a/docs/_docs/sql-reference/ddl.adoc b/docs/_docs/sql-reference/ddl.adoc
index 29f9650..3fc6790 100644
--- a/docs/_docs/sql-reference/ddl.adoc
+++ b/docs/_docs/sql-reference/ddl.adoc
@@ -14,25 +14,26 @@
 // limitations under the License.
 = Data Definition Language (DDL)
 
-This section walks you through all data definition language (DDL) commands supported by Apache Ignite 3.0.
+This section walks you through all data definition language (DDL) commands supported by Ignite 3.
 
 == CREATE TABLE
 
 Creates a new table.
 
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
 
 [.diagram-container]
 Diagram(
-NonTerminal('CREATE'),
-NonTerminal('TABLE'),
-Optional('IF NOT EXISTS'),
-NonTerminal('table_name'),
-NonTerminal('('),
+Terminal('CREATE'),
+Terminal('TABLE'),
+Optional(Terminal('IF NOT EXISTS')),
+NonTerminal('table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('('),
 Choice(1,
-NonTerminal('constraint'),
-NonTerminal('column_definition'),
-NonTerminal(',')),
-NonTerminal(')'),
+NonTerminal('constraint', {href:'./grammar-reference/#constraint'}),
+NonTerminal('column_definition', {href:'./grammar-reference/#column_definition'}),
+Terminal(',')),
+Terminal(')'),
 End({type:'complex'})
 )
 
@@ -41,33 +42,35 @@
 Start({type:'complex'}),
 Optional(
 Sequence(
-NonTerminal('COLOCATE'),
+Terminal('COLOCATE'),
 Optional('BY'),
-NonTerminal('column_list')
+NonTerminal('column_list', {href:'./grammar-reference/#column_list'})
 )
 ),
 Optional(
 Sequence(
-NonTerminal('WITH'),
-OneOrMore('param_name')
+Terminal('WITH'),
+OneOrMore (NonTerminal('parameter', {href:'./grammar-reference/#parameter'}), Terminal(','))
+)
+),
+Optional(
+Sequence(
+Terminal('EXPIRE'),
+Terminal('AT'),
+NonTerminal('expiry_column_name')
 )
 )
 )
 
-
-Parameters:
+Keywords and parameters:
 
 * `table_name` - name of the table. Can be schema-qualified.
-* `table_column` - name and type of a column to be created in the new table.
 * `IF NOT EXISTS` - create the table only if a table with the same name does not exist.
-* `COLOCATED BY` - colocation key. The key can be composite. Primary key must include colocation key. Was `affinity_key` in Ignite 2.x.
-* `WITH` - accepts the following additional parameters:
-
-** `Replicas` - sets the number of partition copies, including the master copy.
-** `Partitions` - sets the number of table partitions.
-** `PRIMARY_ZONE` - sets the link:distribution-zones[Distriburion Zone].
-** Other parameters, depending on the database engine.
-
+* `COLOCATE BY` - colocation key. The key can be composite. Primary key must include colocation key. Was `affinity_key` in Ignite 2.x.
+* `WITH` - accepts additional parameters; currently, accepts only:
+** `PRIMARY_ZONE` - sets the link:sql-reference/distribution-zones[Distribution Zone].
+* `EXPIRE AT` - allows specifying a column with a point in time when a record should be deleted. 
+* `expiry_column_name` - name of the column that contains values on which the record expiry is based.
 
 Examples:
 
@@ -80,115 +83,90 @@
   city_id int,
   name varchar,
   age int,
-  company varchar,
+  company varchar
 )
 ----
-//
-== ALTER TABLE
-//
-Modifies the structure of an existing table.
-//
-//[NOTE]
-//====
-//[discrete]
-//=== Scope of ALTER TABLE
-//Presently, Apache Ignite only supports addition and removal of columns.
-//====
-//
-//* *ALTER TABLE IF EXISTS table LOGGING*
-//
-//[source,sql]
-//----
-//ALTER TABLE [IF EXISTS] tableName {LOGGING}
-//----
-//
-//Parameters:
-//
-//- `IF EXISTS` - if applied to `TABLE`, do not throw an error if a table with the specified table name does not exist. If applied to `COLUMN`, do not throw an error if a column with the specified name does not exist.
-//- `qualified_table_name` - the name of the table.
-//- `LOGGING` - enables write-ahead logging for the table. The command is relevant only if Ignite link:https://ignite.apache.org/docs/3.0.0-alpha/persistence[persistence,window=_blank] is used.
-//
-//image::images/svg/alter_table_logging_bnf.svg[Embedded,opts=inline]
-//
-//* *ALTER TABLE IF EXISTS table NOLOGGING*
-//
-//[source,sql]
-//----
-//ALTER TABLE [IF EXISTS] tableName {NOLOGGING}
-//----
-//
-//Parameters:
-//
-//- `IF EXISTS` - if applied to `TABLE`, do not throw an error if a table with the specified table name does not exist. If applied to `COLUMN`, do not throw an error if a column with the specified name does not exist.
-//- `qualified_table_name` - the name of the table.
-//- `NOLOGGING` - disables write-ahead logging for the table. The command is relevant only if Ignite link:https://ignite.apache.org/docs/3.0.0-alpha/persistence[persistence,window=_blank] is used.
-//
-//image::images/svg/alter_table_no_logging_bnf.svg[Embedded,opts=inline]
-//
 
-==== ALTER TABLE IF EXISTS table ADD COLUMN (column1 int, column2 int)
+Creates a Person table that uses distribution zone `MYZONE`:
+
+[source,sql]
+----
+CREATE TABLE IF NOT EXISTS Person (
+  id int primary key,
+  city_id int,
+  name varchar,
+  age int,
+  company varchar
+) WITH PRIMARY_ZONE=`MYZONE`
+----
+
+Creates a Person table where the records expire at timestamps in the `ttl` column:
+
+[source,sql]
+----
+CREATE TABLE IF NOT EXISTS Person ( 
+  id int PRIMARY KEY,
+  name varchar,
+  ttl timestamp
+) EXPIRE AT ttl
+----
+
+Creates a Person table where the default value if the `city_id` column is 1:
+
+[source,sql]
+----
+CREATE TABLE IF NOT EXISTS Person (
+  id int primary key,
+  city_id int default 1,
+  name varchar,
+  age int,
+  company varchar
+)
+----
+
+Creates a Person table with the `duration` column automatically set a week into the future:
+
+[source,sql]
+----
+CREATE TABLE IF NOT EXISTS Person (
+  id int primary key,
+  city_id int,
+  name varchar,
+  age int,
+  company varchar,
+  duration timestamp default CURRENT_TIMESTAMP + INTERVAL '1' WEEK
+)
+----
+
+== ALTER TABLE
+
+Modifies the structure of an existing table.
+
+=== ALTER TABLE IF EXISTS table ADD COLUMN (column1 int, column2 int)
+
+Adds column(s) to an existing table.
 
 [.diagram-container]
 Diagram(
-  NonTerminal('ALTER TABLE'),
-  Optional('IF EXISTS'),
-  NonTerminal('qualified_table_name'),
-  NonTerminal('ADD'),
-  Optional('COLUMN'),
+  Terminal('ALTER TABLE'),
+  Optional(Terminal('IF EXISTS')),
+  NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+  Terminal('ADD'),
+  Optional(Terminal('COLUMN')),
 End({type:'complex'})
 )
 
 [.diagram-container]
 Diagram(
   Start({type:'complex'}),
-  NonTerminal('column_definition_or_list')
+  NonTerminal('column_definition_or_list', {href:'./grammar-reference/#column_definition_or_list'})
 )
 
+Keywords and parameters:
 
-Parameters:
-
-- `IF EXISTS` - if applied to `TABLE`, do not throw an error if a table with the specified table name does not exist. If applied to `COLUMN`, do not throw an error if a column with the specified name does not exist.
-- `qualified_table_name` - the name of the table.
-- `ADD` - adds a new column or several columns to a previously created table. Once a column is added, it can be accessed using link:sql-reference/dml[DML commands,window=_blank] and indexed with the CREATE INDEX statement.
-- `COLUMN` - name of the defined column.
-- `column_definition_or_list` - definition of the column to add to the table.
-
-
-
-==== ALTER TABLE IF EXISTS table DROP COLUMN (column1, column2 int)
-
-[.diagram-container]
-Diagram(
-NonTerminal('ALTER TABLE'),
-Optional('IF EXISTS'),
-NonTerminal('qualified_table_name'),
-NonTerminal('DROP'),
-Optional('COLUMN'),
-End({type:'complex'})
-)
-
-[.diagram-container]
-Diagram(
-Start({type:'complex'}),
-NonTerminal('column_list')
-)
-
-
-Parameters:
-
-- `IF EXISTS` - if applied to `TABLE`, do not throw an error if a table with the specified table name does not exist. If applied to `COLUMN`, do not throw an error if a column with the specified name does not exist.
-- `qualified_table_name` - the name of the table.
-- `DROP` - removes an existing column or multiple columns from a table. Once a column is removed, it cannot be accessed within queries. Consider the following notes and limitations:
-- `column_list` - the name of the list of columns to be removed.
-
-
-- If the column was indexed, the index has to be dropped manually in advance by using the 'DROP INDEX' command.
-- It is not possible to remove a column if it represents the whole value stored in the cluster. The limitation is relevant for primitive values.
-Ignite stores data in the form of key-value pairs and all the new columns will belong to the value. It's not possible to change a set of columns of the key (`PRIMARY KEY`).
-
-//Both DDL and DML commands targeting the same table are blocked for a short time until `ALTER TABLE` is in progress.
-
-Schema changes applied by this command are persisted on disk. Thus, the changes can survive full cluster restarts.
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ADD` - adds a new column or several columns to a previously created table. Once a column is added, it can be accessed using link:sql-reference/dml[DML commands,window=_blank] and indexed with the CREATE INDEX statement.
+* `COLUMN` - name of the defined column.
 
 Examples:
 
@@ -213,6 +191,42 @@
 ALTER TABLE Region ADD COLUMN (code varchar, gdp double);
 ----
 
+=== ALTER TABLE IF EXISTS table DROP COLUMN (column1, column2 int)
+
+Removes column(s) from an existing table.
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('DROP'),
+Optional(Terminal('COLUMN')),
+End({type:'complex'})
+)
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+NonTerminal('column_list', {href:'./grammar-reference/#column_list'})
+)
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `DROP` - removes an existing column or multiple columns from a table. Once a column is removed, it cannot be accessed within queries. Consider the following notes and limitations:
++
+If the column was indexed, the index has to be dropped manually in advance by using the 'DROP INDEX' command.
+It is not possible to remove a column if it represents the whole value stored in the cluster. The limitation is relevant for primitive values.
+Ignite stores data in the form of key-value pairs and all the new columns will belong to the value. It's not possible to change a set of columns of the key (`PRIMARY KEY`).
++
+* `COLUMN` - name of the defined column.
+//Both DDL and DML commands targeting the same table are blocked for a short time until `ALTER TABLE` is in progress.
+
+Schema changes applied by this command are persisted on disk. Thus, the changes can survive full cluster restarts.
+
+Examples:
+
 Drop a column from the table:
 
 [source,sql]
@@ -234,35 +248,222 @@
 ALTER TABLE Person DROP COLUMN (code, gdp);
 ----
 
-//Disable write-ahead logging:
-//
-//[source,sql]
-//----
-//ALTER TABLE Person NOLOGGING
-//----
+=== ALTER TABLE IF EXISTS table ALTER COLUMN column SET DATA TYPE
+
+Modifies column(s) in an existing table.
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('ALTER COLUMN'),
+NonTerminal('column_name', {href:'./grammar-reference/#column_name'}),
+End({type:'complex'})
+)
+
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Terminal('SET DATA TYPE'),
+NonTerminal('data_type'),
+NonTerminal('('),
+Optional(
+Sequence(
+Choice (0,'NOT NULL','NULLABLE')
+)
+)
+)
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ALTER COLUMN` - alter the defined column.
+* `SET DATA TYPE` - set data type of the defined column.
+* `data_type` - a valid link:sql-reference/data-types[data type]. If the data type supports arguments, you can specify them as well.
+
+Examples:
+
+Alter a column in the table:
+
+[source,sql]
+----
+ALTER TABLE Person ALTER COLUMN city varchar;
+----
+
+Alter several columns in the table at once:
+
+[source,sql]
+----
+ALTER TABLE Region ALTER COLUMN (code varchar, gdp double);
+----
+
+//The above examples need to be changed as they must show HOW the column(s) must be altered.
+
+==== Supported Transitions
+
+Not all data type transitions are supported. The limitations are listed below:
+
+* `FLOAT` can be transitioned to `DOUBLE`
+* `INT8`, `INT16` and `INT64` can be transitioned to `INT32`
+* `TYPE SCALE` change is forbidden
+* `TYPE PRECISION` increase is allowed for DECIMAL non PK column
+* `TYPE LENGTH` increase is allowed for STRING and BYTE_ARRAY non PK column
+
+Other transitions are not supported.
+
+Examples:
+
+Changes the possible range of IDs to BIGINT ranges:
+
+[source,sql]
+----
+ALTER TABLE cities ALTER COLUMN ID SET DATA TYPE BIGINT
+----
+
+Sets the length of a column text to 11:
+
+[source,sql]
+----
+ALTER TABLE cities ALTER COLUMN name SET DATA TYPE varchar(11)
+----
+
+=== ALTER TABLE IF EXISTS table ALTER COLUMN column SET NOT NULL
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('ALTER COLUMN'),
+NonTerminal('column_name', {href:'./grammar-reference/#column_name'}),
+End({type:'complex'})
+)
+
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Terminal('SET NOT NULL'),
+End({type:'simple'})
+)
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ALTER COLUMN` - alter the defined column.
+* `SET DATA TYPE` - set the defined column value to no a not-null value.
+
+==== Supported Transitions
+
+Not all data type transitions are supported. The limitations are listed below:
+
+* `NULLABLE` to `NOT NULL` transition is forbidden
+
+
+=== ALTER TABLE IF EXISTS table ALTER COLUMN column DROP NOT NULL
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('ALTER COLUMN'),
+NonTerminal('column_name', {href:'./grammar-reference/#column_name'}),
+End({type:'complex'})
+)
+
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Terminal('DROP NOT NULL'),
+End({type:'simple'})
+)
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ALTER COLUMN` - alter the defined column.
+* `DROP NOT NULL` - drop all not-null values.
+
+==== Supported Transitions
+
+Not all data type transitions are supported. The limitations are listed below:
+
+* `NOT NULL` to `NULLABLE` transition is allowed for any non-PK column
+
+=== ALTER TABLE IF EXISTS table ALTER COLUMN column SET DEFAULT
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('ALTER COLUMN'),
+NonTerminal('column_name', {href:'./grammar-reference/#column_name'}),
+End({type:'complex'})
+)
+
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Terminal('SET DEFAULT'),
+NonTerminal('default_value'),
+End({type:'simple'})
+)
+
+Keywords and parameters:
+
+* `IF NOT EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ALTER COLUMN` - alter the defined column.
+* `SET DEFAULT` - set the defined column's default value to the provided one.
+* `default_value` - new default value for the column.
+
+=== ALTER TABLE IF EXISTS table ALTER COLUMN column DROP DEFAULT
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('ALTER COLUMN'),
+NonTerminal('column_name', {href:'./grammar-reference/#column_name'}),
+End({type:'complex'})
+)
+
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Terminal('DROP DEFAULT'),
+End({type:'simple'})
+)
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a table with the specified table name does not exist.
+* `ALTER COLUMN` - alter the defined column.
+* `DROP DEFAULT` - dropt the default value for the defined column.
 
 == DROP TABLE
 
 The `DROP TABLE` command drops an existing table.
 
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
 
 [.diagram-container]
 Diagram(
-NonTerminal('DROP TABLE'),
-Optional('IF EXISTS'),
-NonTerminal('qualified_table_name')
+Terminal('DROP TABLE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'})
 )
 
+Keywords and parameters:
 
-
-Parameters:
-
-- `IF EXISTS` - do not throw an error if a table with the same name does not exist.
-- `qualified_table_name` - the name of the table. Can be schema-qualified.
-
-
-//Both DDL and DML commands targeting the same table are blocked while the `DROP TABLE` is in progress.
-//Once the table is dropped, all pending commands will fail with appropriate errors.
+* `IF EXISTS` - do not throw an error if a table with the same name does not exist.
 
 Schema changes applied by this command are persisted on disk. Thus, the changes can survive full cluster restarts.
 
@@ -275,87 +476,100 @@
 DROP TABLE IF EXISTS "Person";
 ----
 
-== DESCRIBE TABLE
-
-Returns information about the distribution zones of the table.
-
-[.diagram-container]
-Diagram(
-NonTerminal('DESCRIBE TABLE'),
-Optional('IF NOT EXISTS'),
-NonTerminal('qualified_table_name'),
-End({type:'complex'})
-)
-
-Parameters:
-
-- `IF EXISTS` - do not throw an error if a table with the same name does not exist.
-- `qualified_table_name` - the name of the table. Can be schema-qualified.
-
 == CREATE INDEX
 
 Creates a new index.
 
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
+
+When you create a new index, it will start building only after all transactions started before the index creation had been completed. Index build will not start if there are any “hung“ transactions in the logical topology of the cluster.
+
+The index status, with the status reason description (e.g., PENDING - “Waiting for transaction ABC to complete”) is reflected in the system view.
+
+
 NOTE: The index cannot include the same column more than once.
 
 [.diagram-container]
 Diagram(
-  NonTerminal('CREATE INDEX'),
-  Optional('IF NOT EXISTS'),
+  Terminal('CREATE INDEX'),
+  Optional(Terminal('IF NOT EXISTS')),
   NonTerminal('name'),
-  NonTerminal('ON'),
-  NonTerminal('qualified_table_name'),
+  Terminal('ON'),
+  NonTerminal('qualified_table_name', {href:'./grammar-reference/#qualified_table_name'}),
   End({type:'complex'})
 )
 
 [.diagram-container]
 Diagram(
-  Start({type:'complex'}),
-  Optional(
-   Sequence(
-    NonTerminal('USING'),
-    Choice (0,'HASH','SORTED')
-   )
-  ),
-  Optional('column_definition')
+Start({type:'complex'}),
+Sequence(
+Choice (0,
+Sequence(
+Terminal('USING'),
+Choice (0,
+Sequence('TREE',NonTerminal ('sorted_column_list', {href:'./grammar-reference/#sorted_column_list'})),
+Sequence('HASH',NonTerminal ('column_list', {href:'./grammar-reference/#column_list'}))
+),
+),
+NonTerminal ('sorted_column_list', {href:'./grammar-reference/#sorted_column_list'})
+)),
+End({type:'simple'})
 )
 
 
-Parameters:
+Keywords and parameters:
 
-
-* `name` - name of the index.
-* `qualified_table_name` - name of the table to create the index on. Can be schema-qualified.
 * `IF NOT EXISTS` - create the index only if an index with the same name does not exist.
-* `USING` - specifies whether the command creates a sorted index or a hash index. Possible values: `HASH`, `SORTED`. Default is `SORTED`.
+* `name` - name of the index.
+* `ON` - create index on the defined table.
+* `USING TREE` -if specified, creates a tree index.
+* `USING HASH` - if specified, creates a hash index.
 
 
-//NOTE: Add image
-
 Examples:
 
-Create index Persons for Person table:
+Create an index `department_name_idx` for the Person table:
 
 [source,sql]
 ----
-CREATE INDEX IF NOT EXISTS Persons ON Person (column1)
+CREATE INDEX IF NOT EXISTS department_name_idx ON Person (department_id DESC, name ASC);
+----
+
+Create a hash index `department_name_idx` for the Person table:
+
+[source,sql]
+----
+CREATE INDEX name_surname_idx ON Person USING HASH (name, surname);
+----
+
+Create a tree index `department_city_idx` for the Person table:
+
+[source,sql]
+----
+CREATE INDEX department_city_idx ON Person USING TREE (department_id ASC, city_id DESC);
 ----
 
 == DROP INDEX
 
+Drops an index.
+
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
+
+When you drop an index, it stays in the STOPPING status until all transactions started before the DROP INDEX command had been completed (even those that do not affect any of the tables for which the index is being dropped).
+Upon completion of all transactions described above, the space the dropped index had occupied is freed up only when LWM of the relevant partition becomes greater than the time when the index dropping had been activated.
+The index status, with the status reason description (e.g., PENDING - “Waiting for transaction ABC to complete”) is reflected in the system view.
+
 [.diagram-container]
 Diagram(
-NonTerminal('DROP INDEX'),
-Optional('IF EXISTS'),
+Terminal('DROP INDEX'),
+Optional(Terminal('IF EXISTS')),
 NonTerminal('index_name')
 )
 
-Parameters:
+Keywords and parameters:
 
-- `index_name` - the name of the index.
-- `IF EXISTS` - do not throw an error if an index with the specified name does not exist.
-
-
+* `index_name` - the name of the index.
+* `IF EXISTS` - do not throw an error if an index with the specified name does not exist.
 
 Examples:
 
@@ -366,153 +580,57 @@
 DROP INDEX IF EXISTS Persons;
 ----
 
-== Grammar Reference
 
-=== column_definition_or_list
+== CREATE CACHE
+
+Creates a new link:ddl.adoc/developers-guide/cache[cache].
 
 [.diagram-container]
 Diagram(
-Group(Sequence(
-Optional('('),
-Choice(0,Sequence(
-NonTerminal('column_name'),
-NonTerminal('data_type')),
-NonTerminal(',')
-),
-Optional(')')
-), 'column_definition_or_list')
-)
-
-
-Referenced by:
-
-* link:sql-reference/ddl#alter-table[ALTER TABLE]
-
-'''
-
-=== column_list
-
-
-[.diagram-container]
-Diagram(Group(Sequence(
-Optional('('),
-OneOrMore('column_name', ','),
-Optional(')')))
-)
-
-
-Referenced by:
-
-* link:sql-reference/ddl#alter-table[ALTER TABLE]
-
-'''
-
-=== constraint
-
-[.diagram-container]
-Diagram(Group(Sequence(
-Optional(Sequence(NonTerminal('Constraint'),NonTerminal('constraint_name')
-)),
-NonTerminal('PRIMARY KEY'),
-NonTerminal('('),
-OneOrMore('column_name', ','),
-NonTerminal(')')
-),'constraint')
-)
-
-
-Referenced by:
-
-* link:sql-reference/ddl#create-table[CREATE TABLE]
-
-'''
-
-=== qualified_table_name
-
-[.diagram-container]
-Diagram(Group(Sequence(
-Optional(Sequence(NonTerminal('schema'),NonTerminal('.')
-),),
-NonTerminal('table_name')
-),'qualified_table_name')
-)
-
-Referenced by:
-
-* link:sql-reference/ddl#сreate-table[CREATE TABLE]
-* link:sql-reference/ddl#alter-table[ALTER TABLE]
-* link:sql-reference/ddl#drop-table[DROP TABLE]
-
-'''
-
-=== column_definition
-//NOTE: Replace code with image
-
-[.diagram-container]
-Diagram(
-Group(Sequence(
-NonTerminal('column_name'),
-NonTerminal('DATA TYPE'),
-Optional(Sequence(Optional('NOT'),NonTerminal('NULL')))
-), 'column_definition'),
+Terminal('CREATE'),
+Terminal('CACHE'),
+Optional(Terminal('IF NOT EXISTS')),
+NonTerminal('table_name', {href:'./grammar-reference/#qualified_table_name'}),
+Terminal('('),
+Choice(1,
+NonTerminal('constraint', {href:'./grammar-reference/#constraint'}),
+NonTerminal('column_definition', {href:'./grammar-reference/#column_definition'}),
+Terminal(',')),
+Terminal(')'),
 End({type:'complex'})
 )
 
 [.diagram-container]
 Diagram(
 Start({type:'complex'}),
-Group(Sequence(
-Optional(Sequence(NonTerminal('DEFAULT'), NonTerminal('literal_value'))),
-NonTerminal('PRIMARY KEY')
-), 'column_definition')
+Optional(
+Sequence(
+Terminal('COLOCATE'),
+Optional('BY'),
+NonTerminal('column_list', {href:'./grammar-reference/#column_list'})
+)
+),
+Optional(
+Sequence(
+Terminal('WITH'),
+OneOrMore (NonTerminal('parameter', {href:'./grammar-reference/#parameter'}), Terminal(','))
+)
+),
+Optional(
+Sequence(
+Terminal('EXPIRE'),
+Terminal('AT'),
+NonTerminal('expiry_column_name')
+)
+)
 )
 
+Keywords and parameters:
 
-Parameters:
-
-* `ASC` or `DESC` - specifies that the column should be sorted in an ascending or descending order respectively.
-* `NULLS` - specifies whether null values will be at the start or at the end of the index. Possible values: `FIRST`, `LAST`.
-
-Referenced by:
-
-* link:sql-reference/ddl#сreate-index[CREATE INDEX]
-
-'''
-
-=== tableColumn
-//NOTE: Replace code with image
-
-
-[.diagram-container]
-Diagram(Group(Sequence(
-NonTerminal('columnName'),
-NonTerminal('columnType'),
-Optional(Sequence(Optional('NOT'),NonTerminal('NULL')
-),),
-Optional('DEFAULT')
-),'tableColumn')
-)
-
-Parameters:
-
-* `[NOT] NULL` - specifies that values in the column always contain null or not null values.
-* `DEFAULT` - specifies a default value for the column. You can specify a constant value, or use a link:sql-reference/ddl#system-functions[system function] to generate a value.
-
-'''
-
-== System Functions
-
-=== gen_random_uuid
-
-This function generates a random UUID value each time it is called.
-
-Example:
-
-[source,sql]
-----
-CREATE TABLE t (id varchar default gen_random_uuid primary key, val int)
-----
-
-Referenced by:
-
-* link:sql-reference/ddl#create-table[CREATE TABLE]
+* `cache_name` - name of the cache. Can be schema-qualified.
+* `IF NOT EXISTS` - create the cache only if a cache with the same name does not exist.
+* `COLOCATE BY` - colocation key. The key can be composite. Primary key must include colocation key. Was `affinity_key` in Ignite 2.x.
+* `WITH` - accepts additional parameters; currently, accepts only:
+** `PRIMARY_ZONE` - sets the link:sql-reference/distribution-zones[Distribution Zone]. The selected distribution zone must use `aimem` storage engine.
+* `EXPIRE AT` - allows specifying a column with a point in time when a record should be deleted.
+* `expiry_column_name` - name of the column that contains values on which the record expiry is based.
diff --git a/docs/_docs/sql-reference/distribution-zones.adoc b/docs/_docs/sql-reference/distribution-zones.adoc
index b7f4bca..6c025b8 100644
--- a/docs/_docs/sql-reference/distribution-zones.adoc
+++ b/docs/_docs/sql-reference/distribution-zones.adoc
@@ -14,38 +14,51 @@
 // limitations under the License.
 = Distribution Zones
 
-This section describes Apache Ignite distribution zones. In Ignite 3, you can fine tune distribution of your partitions on nodes for better performance and stability.
+This section describes Ignite 3 distribution zones. In Ignite 3, you can fine-tune distribution of your partitions on nodes for better performance and stability.
 
 == CREATE ZONE
 
 Creates a new distribution zone.
 
-[source,sql]
-----
-CREATE ZONE [IF NOT EXISTS] qualified_zone_name [ENGINE engine_name]
-[WITH
-    [PARTITIONS = partitionNumber],
-    [REPLICAS = replicaNumber],
-    {[DATA_NODES_AUTO_ADJUST_SCALE_UP = scale_up_value |
-    DATA_NODES_AUTO_ADJUST_SCALE_DOWN = scale_down_value |
-    (DATA_NODES_AUTO_ADJUST_SCALE_UP = scale_up_value & DATA_NODES_AUTO_ADJUST_SCALE_DOWN = scale_down_value)]},
-    [DATA_NODES_FILTER = jsonPathFilter]
-]
-[;]
-----
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
 
-Parameters:
+[.diagram-container]
+Diagram(
+Terminal('CREATE ZONE'),
+Optional(Terminal('IF NOT EXISTS')),
+NonTerminal('qualified_zone_name'),
+Optional(Sequence(Terminal('ENGINE'),
+NonTerminal('engine_name'))),
+End({type:'complex'})
+)
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Optional(Sequence(
+Terminal('WITH'),
+Optional('('),
+OneOrMore(
+NonTerminal('parameter', {href:'./grammar-reference/#parameter'}),
+','),
+Optional(')'))))
 
 
-* `qualified_zone_name` - name of the distribution zone. Can be schema-qualified.
+Keywords and parameters:
+
 * `IF NOT EXISTS` - create a zone only if a different zone with the same name does not exist.
-* `ENGINE` - selects the storage engine to use. Currently `aipersist`, `aimem` and `rocksdb` are available.
+* `qualified_zone_name` - a name of the distribution zone.
+* `ENGINE` - selects the storage engine (`engine_name`) to use. Currently available are:
+** `aipersist`
+** `aimem`
+** `rocksdb`
 * `WITH` - accepts the following additional parameters:
-- `PARTITIONS` - the number of parts data is divinded into. Partitions are then split between nodes for storage.
-- `REPLICAS` - the number of copies of each partition.
-- `DATA_NODES_AUTO_ADJUST_SCALE_UP` - the delay in seconds between the new node joining and the start of data zone adjustment.
-- `DATA_NODES_AUTO_ADJUST_SCALE_DOWN` - the delay in seconds between the node leaving the cluster and the start of data zone adjustment.
-- `DATA_NODES_FILTER` - specifies the nodes that can be used to store data in the distribution zone based on node attributes. You can configure node attributes by using cli.  Filter uses JSONPath rules. If the attribute is not found, all negative comparisons will be valid. For example, `$[?(@.storage != 'SSD']}` will also include nodes without the `storage` attribute specified.
+** `PARTITIONS` - the number of partition the data is divided into. Partitions are then split between nodes for storage.
+** `REPLICAS` - the number of copies of each partition.
+** `DATA_NODES_FILTER` - specifies the nodes that can be used to store data in the distribution zone based on node attributes. You can configure node attributes by using cli. Filter uses JSONPath rules. If the attribute is not found, all negative comparisons will be valid. For example, `$[?(@.storage != 'SSD']}` will also include nodes without the `storage` attribute specified.
+** `DATA_NODES_AUTO_ADJUST_SCALE_UP` - the delay in seconds between the new node joining and the start of data zone adjustment.
+** `DATA_NODES_AUTO_ADJUST_SCALE_DOWN` - the delay in seconds between the node leaving the cluster and the start of data zone adjustment.
+** `DATA_STORAGE_ENGINE` - the name of the data storage engine.
 
 Examples:
 
@@ -56,55 +69,77 @@
 CREATE ZONE IF NOT EXISTS exampleZone
 ----
 
-
 Creates an `exampleZone` distribution zone that will only use nodes with SSD attribute and adjust 300 seconds after cluster topology changes:
 
 [source,sql]
 ----
-CREATE ZONE IF NOT EXISTS exampleZone WITH DATA_NODES_FILTER="$[?(@.storage == 'SSD')]", DATA_NODES_AUTO_ADJUST=300
+CREATE ZONE IF NOT EXISTS exampleZone WITH DATA_NODES_FILTER=SSD, DATA_NODES_AUTO_ADJUST_SCALE_UP=300
 ----
 
 == ALTER ZONE
 
-Renames a distribution zone.
+Modifies a distribution zone.
 
-[source,sql]
-----
-ALTER ZONE IF EXISTS { 'qualified_zone_name' } [RENAME TO {new_qualified_zone_name}]
-[WITH
-    [PARTITIONS = partitionNumber],
-    [REPLICAS = replicaNumber],
-    {[DATA_NODES_AUTO_ADJUST_SCALE_UP = scale_up_value |
-    DATA_NODES_AUTO_ADJUST_SCALE_DOWN = scale_down_value |
-    (DATA_NODES_AUTO_ADJUST_SCALE_UP = scale_up_value & DATA_NODES_AUTO_ADJUST_SCALE_DOWN = scale_down_value)]},
-    [DATA_NODES_FILTER = jsonPathFilter]
-]
-[;]
-----
+=== ALTER ZONE RENAME TO new_qualified_zone_name
 
-Parameters:
+[.diagram-container]
+Diagram(
+Terminal('ALTER ZONE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_zone_name'),
+Terminal('RENAME TO'),
+NonTerminal('new_qualified_zone_name'),
+)
 
-* `qualified_zone_name` - name of the distribution zone. Can be schema-qualified.
+Keywords and parameters:
+
 * `IF EXISTS` - do not throw an error if a zone with the specified name does not exist.
-* `WITH` - accepts the following additional parameters:
-- `PARTITIONS` - the number of parts data is divinded into. Partitions are then split between nodes for storage.
-- `REPLICAS` - the number of copies of each partition.
-- `DATA_NODES_AUTO_ADJUST_SCALE_UP` - the delay in seconds between the new node joining and the start of data zone adjustment.
-- `DATA_NODES_AUTO_ADJUST_SCALE_DOWN` - the delay in seconds between the node leaving the cluster and the start of data zone adjustment.
-- `DATA_NODES_FILTER` - specifies the nodes that can be used to store data in the distribution zone based on node attributes.
+* `qualified_zone_name` - the current name of the distribution zone.
+* `RENAME TO` - renames the selected zone to the new name.
+* `new_qualified_zone_name` - the new name of the distribution zone (assigned by `RENAME`).
+
+=== ALTER ZONE SET
+
+[.diagram-container]
+Diagram(
+Terminal('ALTER ZONE'),
+Optional(Terminal('IF EXISTS')),
+NonTerminal('qualified_zone_name'),
+Sequence(Terminal('SET'),
+Optional('('),
+OneOrMore(
+NonTerminal('parameter', {href:'./grammar-reference/#parameter'}),
+','),
+Optional(')')))
+
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a zone with the specified name does not exist.
+* `qualified_zone_name` - a name of the distribution zone.
+* `SET` - assigns values to any or all of the following parameters:
+** `PARTITIONS` - the number of partitions
+** `REPLICAS` - the number of copies of each partition.
+** `DATA_NODES_FILTER` - specifies the nodes that can be used to store data in the distribution zone based on node attributes.
+** `DATA_NODES_AUTO_ADJUST_SCALE_UP` - the delay in seconds between the new node joining and the start of data zone adjustment.
+** `DATA_NODES_AUTO_ADJUST_SCALE_DOWN` - the delay in seconds between the node leaving the cluster and the start of data zone adjustment.
 
 == DROP ZONE
 
-The `DROP ZONE` command drops an existing distribution zone.
+Drops an existing distribution zone.
 
-----
-DROP ZONE IF EXISTS qualified_zone_name
-----
+NOTE: This can also be done via the link:developers-guide/java-to-tables[Java API].
 
-Parameters:
+[.diagram-container]
+Diagram(
+Terminal('DROP ZONE'),
+Terminal('IF EXISTS'),
+NonTerminal('qualified_zone_name')
+)
 
-- `IF EXISTS` - do not throw an error if a zone with the specified name does not exist.
-- `qualified_zone_name` - the name of the distribution zone. Can be schema-qualified.
+Keywords and parameters:
+
+* `IF EXISTS` - do not throw an error if a zone with the specified name does not exist.
+* `qualified_zone_name` - the name of the distribution zone.
 
 
 Examples:
@@ -114,4 +149,4 @@
 [source,sql]
 ----
 DROP ZONE IF EXISTS exampleZone
-----
+----
\ No newline at end of file
diff --git a/docs/_docs/sql-reference/dml.adoc b/docs/_docs/sql-reference/dml.adoc
index 0335694..28c20cb 100644
--- a/docs/_docs/sql-reference/dml.adoc
+++ b/docs/_docs/sql-reference/dml.adoc
@@ -14,7 +14,7 @@
 // limitations under the License.
 = Data Manipulation Language (DML)
 
-This section walks you through all data manipulation language (DML) commands supported by Apache Ignite 3.0.
+This section walks you through all data manipulation language (DML) commands supported by Ignite 3.
 
 WARNING: Currently, `WITH` and `MERGE` commands are not supported.
 
diff --git a/docs/_docs/sql-reference/grammar-reference.adoc b/docs/_docs/sql-reference/grammar-reference.adoc
new file mode 100644
index 0000000..44a9acf
--- /dev/null
+++ b/docs/_docs/sql-reference/grammar-reference.adoc
@@ -0,0 +1,243 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Grammar Reference
+
+This section describes grammar elements that are common to multiple SQL functions (link:sql-reference/ddl[DDL], link:sql-reference/distribution-zones[Distribution Zones], etc.).
+
+== column_definition_or_list
+
+[.diagram-container]
+Diagram(
+Sequence(
+Optional('('),
+Choice(0,Sequence(
+NonTerminal('column_name'),
+NonTerminal('data_type')),
+NonTerminal(',')
+),
+Optional(')')
+),
+)
+
+Keywords and parameters:
+
+* `column_name` - a column name.
+* `data_type` - a valid link:sql-reference/data-types[data type].
+
+
+Referenced by:
+
+* link:sql-reference/ddl#alter-table[ALTER TABLE]
+
+'''
+
+== column_list
+
+[.diagram-container]
+Diagram(Sequence(
+Optional('('),
+OneOrMore(NonTerminal('column_name'), ','),
+Optional(')')),
+)
+
+Keywords and parameters:
+
+* `column_name` - a column name.
+
+
+Referenced by:
+
+* link:sql-reference/ddl#create-table[CREATE TABLE]
+* link:sql-reference/ddl#alter-table[ALTER TABLE]
+* link:sql-reference/ddl#create-index[CREATE INDEX]
+* <<constraint>>
+
+
+'''
+
+== sorted_column_list
+
+[.diagram-container]
+Diagram(
+Sequence('(', OneOrMore(Sequence(NonTerminal('column_name'), Optional(Choice(0, Terminal('ASC'), Terminal('DESC')))), ','), ')')
+)
+
+Keywords and parameters:
+
+* `column_name` - a column name.
+
+
+Referenced by:
+
+* link:sql-reference/ddl#create-index[CREATE INDEX]
+* <<constraint>>
+
+'''
+
+== constraint
+
+[.diagram-container]
+Diagram(Sequence(
+Optional(Sequence(Terminal('CONSTRAINT'),NonTerminal('constraint_name')
+)),
+Terminal('PRIMARY KEY'),
+Optional(
+Choice(0,
+Sequence(
+Terminal('USING'),
+Choice (0,
+Sequence(Terminal('SORTED'), NonTerminal('sorted_column_list', {href:'./grammar-reference/#sorted_column_list'})
+),
+Sequence('HASH', NonTerminal('column_list', {href:'./grammar-reference/#column_list'})))
+))
+)))
+
+Keywords and parameters:
+
+* `constraint_name` - a name of the constraint.
+
+Referenced by:
+
+* link:sql-reference/ddl#create-table[CREATE TABLE]
+
+'''
+
+== qualified_table_name
+
+[.diagram-container]
+Diagram(Sequence(
+Optional(Sequence(NonTerminal('schema'),NonTerminal('.')
+),),
+NonTerminal('table_name')
+),
+)
+
+Keywords and parameters:
+
+* `schema` - a name of the table schema.
+* `table_name` - a name of the table.
+
+Referenced by:
+
+* link:sql-reference/ddl#create-table[CREATE TABLE]
+* link:sql-reference/ddl#alter-table[ALTER TABLE]
+* link:sql-reference/ddl#drop-table[DROP TABLE]
+* link:sql-reference/ddl#create-index[CREATE INDEX]
+
+'''
+
+== column_definition
+
+[.diagram-container]
+Diagram(
+Sequence(
+NonTerminal('column_name'),
+NonTerminal('DATA TYPE', {href:'./data-types'}),
+Optional(Sequence(Optional('NOT'),Terminal('NULL')))
+),
+End({type:'complex'})
+)
+
+[.diagram-container]
+Diagram(
+Start({type:'complex'}),
+Sequence(
+Optional(Sequence(Terminal('DEFAULT'), Choice(1,NonTerminal('identifier'),
+NonTerminal('literal_value'),
+Sequence(Terminal('CURRENT TIMESTAMP'), Optional(Sequence(Terminal('+'),Terminal('INTERVAL'), NonTerminal('interval')))
+)
+),)),
+Optional(Terminal('PRIMARY KEY')),
+))
+
+Keywords and parameters:
+
+* `column_name` - a column name.
+* `DATA TYPE` - the link:sql-reference/data-types[data type] allowed in the column.
+* `identifier` - the random identifier for the row. Can be generated by using the <<gen_random_uuid>> function.
+* `literal_value` - a value to be assigned as default.
+* `CURRENT TIMESTAMP` - the function that returns current time. Can only be used for `TIMESTAMP` columns.
+* `interval` - the time interval by which the timestamp should be offset.
+
+Referenced by:
+
+* link:sql-reference/ddl#сreate-table[CREATE TABLE]
+* link:sql-reference/ddl#alter-table[ALTER TABLE]
+* link:sql-reference/ddl#create-index[CREATE INDEX]
+* link:sql-reference/distribution-zones#create-zone[CREATE ZONE]
+
+'''
+
+== parameter
+
+[.diagram-container]
+Diagram(
+  NonTerminal('parameter_name'),
+  Terminal('='),
+  NonTerminal('parameter_value'))
+
+Parameters:
+
+* `parameter_name` - the name of the parameter.
+* `parameter_value` - the value of the parameter.
+
+When a parameter is specified, you can provide it as a literal value or as an identifier. For example:
+
+----
+CREATE ZONE test_zone;
+CREATE TABLE test_table (id INT PRIMARY KEY, val INT) WITH PRIMARY_ZONE=test_zone;
+----
+
+In this case, `test_zone` is the identifier, and is used as an identifier. When used like this, the parameters are not case-sensitive.
+
+----
+CREATE ZONE "test_zone";
+CREATE TABLE test_table (id INT PRIMARY KEY, val INT) WITH PRIMARY_ZONE='test_zone';
+----
+
+In this case, `test_zone` is created as a literal value, and is used as a literal. When used like this, the parameter is case-sensitive.
+
+----
+CREATE ZONE test_zone;
+CREATE TABLE test_table (id INT PRIMARY KEY, val INT) WITH PRIMARY_ZONE=`TEST_ZONE`;
+----
+
+In this case, `test_zone` is created as an identifier, and is case-insensitive. As such, when `TEST_ZONE` is used as a literal, it still matches the identifier.
+
+
+Referenced by:
+
+* link:sql-reference/ddl#сreate-table[CREATE TABLE]
+* link:sql-reference/distribution-zones#create-zone[CREATE ZONE]
+* link:sql-reference/distribution-zones#alter-zone[ALTER ZONE]
+
+'''
+
+== System Functions
+
+=== gen_random_uuid
+
+This function generates a random UUID value each time it is called.
+
+Example:
+
+[source,sql]
+----
+CREATE TABLE t (id varchar default gen_random_uuid primary key, val int)
+----
+
+Referenced by:
+
+* link:sql-reference/ddl#create-table[CREATE TABLE]
diff --git a/docs/_docs/sql-reference/operators-and-functions.adoc b/docs/_docs/sql-reference/operators-and-functions.adoc
index 8ca3dca..62cdb49 100644
--- a/docs/_docs/sql-reference/operators-and-functions.adoc
+++ b/docs/_docs/sql-reference/operators-and-functions.adoc
@@ -16,15 +16,6 @@
 
 == Aggregate Functions
 
-=== COUNT
-
-[source,sql]
-----
-COUNT( [ ALL | DISTINCT ] value [, value ]*)
-----
-
-Returns the number of input rows for which value is not null (wholly not null if value is composite).
-
 === AVG
 
 [source,sql]
@@ -34,23 +25,14 @@
 
 Returns the average (arithmetic mean) of numeric across all input values.
 
-=== SUM
+=== COUNT
 
 [source,sql]
 ----
-SUM( [ ALL | DISTINCT ] numeric)
+COUNT( [ ALL | DISTINCT ] value [, value ]*)
 ----
 
-Returns the sum of numeric across all input values.
-
-=== MIN
-
-[source,sql]
-----
-MIN( [ ALL | DISTINCT ] value)
-----
-
-Returns the minimum value across all input values.
+Returns the number of input rows for which value is not null (wholly not null if value is composite).
 
 === MAX
 
@@ -61,111 +43,112 @@
 
 Returns the maximum value across all input values.
 
-//=== STRING_AGG
+=== MIN
 
-//[source,sql]
-//----
-//STRING_AGG( value [, separator ] [ ORDER BY ...])
-//----
+[source,sql]
+----
+MIN( [ ALL | DISTINCT ] value)
+----
 
-//Concatenates the values of string expressions and places separator values between them.
+Returns the minimum value across all input values.
 
-//=== STRING_CONCAT
+=== SUM
 
-//[source,sql]
-//----
-//STRING_CONCAT(string1, string2, ... stringN)
-//----
+[source,sql]
+----
+SUM( [ ALL | DISTINCT ] numeric)
+----
 
-//Concatenates the text values in the specified data ranges.
+Returns the sum of numeric across all input values.
 
 
 == Functions
 
+=== GENERAL
+
+* CAST
+* COALESCE
+* GREATEST
+* NULLIF
+* NVL
+
+For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
 
 === JSON
 
-* JSON_EXISTS
-* JSON_VALUE
-* JSON_QUERY
-* JSON_OBJECT
-* JSON_ARRAY
-* JSON_PRETTY
-* STRING
+* ASCII
 * CHAR_LENGTH
 * CHARACTER_LENGTH
-* UPPER
-* LOWER
-* POSITION
-* TRIM
-* OVERLAY
-* SUBSTRING
-* INITCAP
-* SPACE
-* STRCMP
-* REVERSE
-* REGEXP_REPLACE
-* SHA1
-* MD5
-* LTRIM
-* TO_BASE64
-* FROM_BASE64
 * COMPRESS
 * CONCAT
-* TRANSLATE
-* ASCII
+* FROM_BASE64
+* INITCAP
+* JSON_ARRAY
+* JSON_EXISTS
+* JSON_OBJECT
+* JSON_PRETTY
+* JSON_QUERY
+* JSON_VALUE
 * LEFT
-* RIGHT
+* LOWER
+* LTRIM
+* MD5
+* OVERLAY
+* POSITION
+* REGEXP_REPLACE
 * REPEAT
+* REVERSE
+* RIGHT
+* SHA1
 * SOUNDEX
-* For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
+* SPACE
+* STRCMP
+* STRING
+* SUBSTRING
+* TO_BASE64
+* TRANSLATE
+* TRIM
+* UPPER
+
+For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
 
 === NUMERIC
 
-* POWER
 * ABS
-* MOD
-* SQRT
-* LN
-* LOG10
-* EXP
-* CEIL
-* FLOOR
-* RAND
 * ACOS
 * ASIN
 * ATAN
 * ATAN2
 * CBRT
+* CEIL
+* CHR
 * COS
+* COSH
 * COT
 * DEGREES
+* EXP
+* FLOOR
+* LN
+* LOG10
+* MOD
 * PI()
+* POWER
 * RADIANS
+* RAND
 * ROUND
 * SIGN
 * SIN
-* TAN
-* TRUNCATE
-* CHR
-* COSH
 * SINH
+* SQRT
+* TAN
 * TANH
-* For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
+* TRUNCATE
 
-=== GENERAL
-
-* NULLIF
-* COALESCE
-* CAST
-* NVL
-* GREATEST
-* For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
-
+For more information on functions supported by Apache Calcite, see the link:https://calcite.apache.org/docs/reference.html#operators-and-functions[product documentation,window=_blank].
 
 === TIMESTAMP
 
-* TIMESTAMP_ADD
-* TIMESTAMP_DIFF
 * EXTRACT
-* LAST_DAY
\ No newline at end of file
+* LAST_DAY
+* TIMESTAMP_ADD
+* TIMESTAMP_DIFF
\ No newline at end of file
diff --git a/docs/_docs/sql-reference/transactions.adoc b/docs/_docs/sql-reference/transactions.adoc
new file mode 100644
index 0000000..eb2e5af
--- /dev/null
+++ b/docs/_docs/sql-reference/transactions.adoc
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= Transactions
+
+A transaction is a sequence of SQL operations that starts with the `START TRANSACTION` statement and ends with the `COMMIT` statement. Either the effect of all operations will be published, or no results will be published at all.
+
+NOTE: Transactions are only allowed within a link:clients/overview#sql-scripts[script].
+
+In Ignite 3, you start the transaction by using the `START TRANSACTION` statement:
+
+[.diagram-container]
+Diagram(
+Terminal('START TRANSACTION'),
+ZeroOrMore(
+Terminal('READ ONLY'),
+Terminal('READ WRITE')))
+
+NOTE: DDL statements are not supported inside transactions.
+
+Parameters:
+
+- `READ WRITE` - both read and write operations are allowed in the transaction.
+- `READ ONLY` - only read operations are allowed in the transaction.
+
+
+You close and commit the transaction by using the `COMMIT` statement:
+
+[.diagram-container]
+Diagram(
+Terminal('COMMIT'))
+
+
+== Example
+
+The example below inserts 3 lines into the table in a single transaction, ensuring they will all be committed together:
+
+[source,sql]
+----
+START TRANSACTION READ WRITE;
+
+INSERT INTO Person (1, 'John', 'Smith');
+INSERT INTO Person (2, 'Jane', 'Smith');
+INSERT INTO Person (3, 'Adam', 'Mason');
+
+COMMIT;
+----
diff --git a/docs/_docs/sql-tuning.adoc b/docs/_docs/sql-tuning.adoc
new file mode 100644
index 0000000..9d2f90b
--- /dev/null
+++ b/docs/_docs/sql-tuning.adoc
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+= SQL Performance Tuning
+
+== Optimizer Hints
+
+The query optimizer tries to execute the fastest execution plan. However, you can know about the data design, application design or data distribution in your cluster better. SQL hints can help the optimizer to make optimizations more rationally or build execution plan faster.
+
+[NOTE]
+====
+SQL hints are optional to apply and might be skipped in some cases.
+====
+
+=== Hints format
+
+SQL hints are defined by a special comment +++/*+ HINT */+++, referred to as a _hint block_. Spaces before and after the
+hint name are required. The hint block must be placed right after the operator. Several hints for one relation operator are not supported.
+
+Example:
+
+[source, SQL]
+----
+SELECT /*+ NO_INDEX */ T1.* FROM TBL1 where T1.V1=? and T1.V2=?
+----
+
+==== Hint parameters
+
+Hint parameters, if required, are placed in brackets after the hint name and separated by commas.
+
+The hint parameter can be quoted. Quoted parameter is case-sensitive. The quoted and unquoted parameters cannot be
+defined for the same hint.
+
+Example:
+[source, SQL]
+----
+SELECT /*+ FORCE_INDEX(TBL1_IDX2,TBL2_IDX1) */ T1.V1, T2.V1 FROM TBL1 T1, TBL2 T2 WHERE T1.V1 = T2.V1 AND T1.V2 > ? AND T2.V2 > ?;
+
+SELECT /*+ FORCE_INDEX('TBL2_idx1') */ T1.V1, T2.V1 FROM TBL1 T1, TBL2 T2 WHERE T1.V1 = T2.V1 AND T1.V2 > ? AND T2.V2 > ?;
+----
+
+=== Hints errors
+
+The optimizer tries to apply every hint and its parameters, if possible. But it skips the hint or hint parameter if:
+
+* The hint is not supported.
+* Required hint parameters are not passed.
+* The hint parameters have been passed, but the hint does not support any parameter.
+* The hint parameter is incorrect or refers to a nonexistent object, such as a nonexistent index or table.
+* The current hints or current parameters are incompatible with the previous ones, such as forcing the use and disabling of the same index.
+
+=== Supported hints
+
+==== FORCE_INDEX / NO_INDEX
+
+Forces or disables index scan.
+
+===== Parameters:
+
+* Empty. To force an index scan for every underlying table. Optimizer will choose any available index. Or to disable all indexes.
+* Single index name to use or skip exactly this index.
+* Several index names. They can relate to different tables. The optimizer will choose indexes for scanning or skip them all.
+
+===== Examples:
+
+[source, SQL]
+----
+SELECT /*+ FORCE_INDEX */ T1.* FROM TBL1 T1 WHERE T1.V1 = T2.V1 AND T1.V2 > ?;
+
+SELECT /*+ FORCE_INDEX(TBL1_IDX2, TBL2_IDX1) */ T1.V1, T2.V1 FROM TBL1 T1, TBL2 T2 WHERE T1.V1 = T2.V1 AND T1.V2 > ? AND T2.V2 > ?;
+
+SELECT /*+ NO_INDEX */ T1.* FROM TBL1 T1 WHERE T1.V1 = T2.V1 AND T1.V2 > ?;
+
+SELECT /*+ NO_INDEX(TBL1_IDX2, TBL2_IDX1) */ T1.V1, T2.V1 FROM TBL1 T1, TBL2 T2 WHERE T1.V1 = T2.V1 AND T1.V2 > ? AND T2.V2 > ?;
+----
+
+NOTE: The query cannot have both `FORCE_INDEX` and `NO_INDEX` hints at the same time.
\ No newline at end of file
diff --git a/docs/_docs/sql/java.adoc b/docs/_docs/sql/java.adoc
deleted file mode 100644
index d609e1d..0000000
--- a/docs/_docs/sql/java.adoc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Java SQL API
-
-In your Java projects, you can use the Java SQL API to execute SQL statements and getting results. All operations are executed as a part of sql session. You can create a session with default parameters by using a `sql.createSession()` method, or use an `sql.sessionBuilder` to configure it for your environment. Sessions are usually long-lived objects that can be used in multiple threads and may hold data server-side. Session object is light-weight, and Ignite manages resources automatically in failover scenarios (for example, for disconnects). You have to close the session manually by using the `close()` method when the session is no longer needed to release server resources. Here is how you usually set up a session:
-
-[source, java]
-----
-Session ses = sql.createSession();
-Session ses = sql.sessionBuilder().defaultSchema("PUBLIC").build();
-----
-
-The `execute` method accepts a query String, or a Statement object, that can be created by using `sql.createStatement()` and `sql.statementBuilder()` methods. Statements are light-weight objects and can be reused in multiple threads or sessions. Setting set by using `statementBuilder` override session settings for this statement. Here is how you usually set up a statement:
-
-[source, java]
-----
-Statement stmt = sql.createStatement(sqlQueryString));
-Statement stmt = sql.statementBuilder().query(sqlQueryString)).build();
-----
-
-== Creating Tables
-
-Here is an example of how you can create a new table on a cluster:
-
-[source, java]
-----
-try (ResultSet rs = ses.execute(null,
-        "CREATE TABLE SYNCH(ID INT PRIMARY KEY, VAL0 INT)")
-) {
-    // no-op
-}
-----
-
-NOTE: ResultSet is closable, but it is safe to skip `close()` method for DDL and DML queries, as they do not keep server cursor open.
-
-
-== Filling Tables
-
-With Apache Ignite 3, you can fill the table by adding rows one by one, or in a batch. In both cases, you create an `INSERT` statement, and then exeсute it:
-
-[source, java]
-----
-long rowsAdded = Arrays.stream(ses.executeBatch(null,
-    "INSERT INTO ACCOUNTS (ACCOUNT_ID, CITY_ID, FIRST_NAME, LAST_NAME, BALANCE) values (?, ?, ?, ?, ?)",
-    BatchedArguments.of(1, 1, "John", "Doe", 1000.0d)
-        .add(2, 1, "Jane", "Roe", 2000.0d)
-        .add(3, 1, "Mary", "Major", 1500.0d)
-        .add(4, 1, "Richard", "Miles", 1450.0d)))
-        .asLongStream().sum();
-----
-
-== Getting Data From Tables
-
-To get data from a table, execute the `SELECT` statement to get a set of results. SqlRow can provide access to column values by column name or column index. You can then iterate through results to get data:
-
-[source, java]
-----
-try (ResultSet rs = ses.execute(null,
-        "SELECT a.FIRST_NAME, a.LAST_NAME FROM ACCOUNTS a")) {
-    while (rs.hasNext()) {
-        SqlRow row = rs.next();
-
-        result += row.stringValue(1) + ", " + row.stringValue("LAST_NAME") + "\n";
-    }
-}
-----
-
-NOTE: ResultSet may hold server-side cursor open due to lazy query execution. It must be closed manually, or by using the `try-with-resources` statement.
diff --git a/docs/_docs/sql/odbc/index.adoc b/docs/_docs/sql/odbc/index.adoc
deleted file mode 100644
index cafbc72..0000000
--- a/docs/_docs/sql/odbc/index.adoc
+++ /dev/null
@@ -1,6 +0,0 @@
----
-layout: toc
----
-
-= ODBC Driver
-
diff --git a/docs/_docs/sql/odbc/odbc-driver.adoc b/docs/_docs/sql/odbc/odbc-driver.adoc
deleted file mode 100644
index f4aa81d..0000000
--- a/docs/_docs/sql/odbc/odbc-driver.adoc
+++ /dev/null
@@ -1,96 +0,0 @@
-= ODBC Driver
-
-== Overview
-
-Apache Ignite 3 includes an ODBC driver that allows you both to select and to modify data stored in a distributed cache by using standard SQL queries and native ODBC API. ODBC driver uses your link:clients/overview[client connection configuration].
-
-ODBC driver only provides thread-safety at the connections level. This means that you should not access the same connection from multiple threads without additional synchronization, though you can create separate connections for every thread and use them simultaneously.
-
-The ODBC driver implements version 3.8 of the ODBC API. For detailed information on ODBC please refer to link:https://msdn.microsoft.com/en-us/library/ms714177.aspx[ODBC Programmer's Reference].
-
-== Installing ODBC Driver
-
-To use ODBC driver, register it in your system so that your ODBC Driver Manager will be able to locate it.
-
-=== Installing on Windows
-
-NOTE: Microsoft Visual C++ 2017 Redistributable Package should be installed first.
-
-Launch the provided installer and follow the instructions.
-
-
-=== Installing on Linux
-
-To build and install ODBC driver on Linux, you need to first install
-ODBC Driver Manager. The ODBC driver has been tested with link:http://www.unixodbc.org[UnixODBC].
-
-==== Download from website
-
-You can get the built rpm or deb package from the provided website. Then, install the package locally to use it.
-
-== Supported Data Types
-
-The following SQL data types are supported:
-
-- `SQL_CHAR`
-- `SQL_VARCHAR`
-- `SQL_LONGVARCHAR`
-- `SQL_SMALLINT`
-- `SQL_INTEGER`
-- `SQL_FLOAT`
-- `SQL_DOUBLE`
-- `SQL_BIT`
-- `SQL_TINYINT`
-- `SQL_BIGINT`
-- `SQL_BINARY`
-- `SQL_VARBINARY`
-- `SQL_LONGVARBINARY`
-- `SQL_GUID`
-- `SQL_DECIMAL`
-- `SQL_TYPE_DATE`
-- `SQL_TYPE_TIMESTAMP`
-- `SQL_TYPE_TIME`
-
-== Using pyodbc
-
-Ignite can be used with link:https://pypi.org/project/pyodbc/[pyodbc]. Here is how you can use pyodbc in Apache Ignite 3:
-
-- Install pyodbc
-+
-[source,shell]
-----
-pip3 install pyodbc
-----
-+
-- Import pyodbc to your project:
-+
-[source,python]
-----
-import pyodbc
-----
-+
-- Connect to the database:
-+
-[source,python]
-----
-conn = pyodbc.connect('Driver={Apache Ignite 3};Address=127.0.0.1:10800;')
-----
-+
-- Set encoding to UTF-8:
-+
-[source,python]
-----
-conn.setencoding(encoding='utf-8')
-conn.setdecoding(sqltype=pyodbc.SQL_CHAR, encoding="utf-8")
-conn.setdecoding(sqltype=pyodbc.SQL_WCHAR, encoding="utf-8")
-----
-+
-- Get data from your database:
-+
-[source,python]
-----
-cursor = conn.cursor()
-cursor.execute('SELECT * FROM table_name')
-----
-
-For more information on using pyodbc, use the link:https://github.com/mkleehammer/pyodbc/wiki[official documentation].
\ No newline at end of file
diff --git a/docs/_docs/ssl-tls.adoc b/docs/_docs/ssl-tls.adoc
deleted file mode 100644
index 2cbf1aa..0000000
--- a/docs/_docs/ssl-tls.adoc
+++ /dev/null
@@ -1,191 +0,0 @@
-= SSL/TLS
-
-This page explains how to configure SSL/TLS encryption between the cluster nodes (server and client) and the clients that connect to your cluster.
-
-== Considerations
-
-All internal connections in the cluster context, as well as cluster's user interaction interfaces, are SSL-enabled. The communication categories are as follows:
-
-* Between the user and the cluster (node): REST
-* Between the user and the platform clients
-* Between nodes: Network (Messaging, Scalecube)
-
-All SSL configurations activities are performed at the node level.
-
-Apache Ignite does not support direct paths to SSL certificates. Instead, it utilizes PKCS12 and JKS keystore.
-
-== REST
-
-The standard implementation of SSL for REST involves configuring a secure connection on a separate port. Apache Ignite supports HTTP and HTTPS, arch on its own port.
-
-The Apache Ignite 3.x REST security configuration is as follows:
-
-[source,json]
-----
-"rest": {
-    "dualProtocol": false,
-    "httpToHttpsRedirection": false,
-    "ssl": {
-        "enabled": true,
-        "port": 10400,
-        "portRange": 100,
-        "keyStore": {
-            "type": "PKCS12",
-            "path": "must not be empty",
-            "password": "may be empty"
-        }
-    }
-}
-----
-
-== Clients and JDBC
-
-Apache Ignite 3.x Client implementation is based on the Netty framework, which supports configuration for security connections via `SSLContextBuilder`.
-
-=== Server-side Configuration
-
-The default way to configure SSL on the server side is to update the configuration with SSL properties:
-
-[source,json]
-----
-"clientConnector": {
-    "ssl": {
-        "enabled": true,
-        "clientAuth": "require",
-        "keyStore": {
-            "type": "PKCS12",
-            "path": "must not be empty",
-            "password": "may be empty"
-        },
-        "trustStore": {
-            "type": "PKCS12",
-            "path": "must not be empty",
-            "password": "may be empty"
-        }
-    }
-}
-----
-
-If you have enabled SSL for `clientConnector`, and want to use JDBC, set the corresponding properties in your code:
-
-[source,java]
-----
-var url =
-    "jdbc:ignite:thin://{address}:{port}"
-        + "?sslEnabled=true"
-        + "&trustStorePath=" + trustStorePath
-        + "&trustStoreType=JKS"
-        + "&trustStorePassword=" + password
-        + "&clientAuth=require"
-        + "&keyStorePath=" + keyStorePath
-        + "&keyStoreType=PKCS12"
-        + "&keyStorePassword=" + password;
-        try (Connection conn = DriverManager.getConnection(url)) {
-            // Other actions.
-        }
-----
-
-
-== Client Configuration
-
-== Java
-
-To enable SSL in your Java clients, use the `IgniteClient` class and pass the ssl configuration to it:
-
-[source,Java]
-----
-var sslConfiguration = SslConfiguration.builder()
-                        .enabled(true)
-                        .trustStoreType("JKS")
-                        .trustStorePath(trustStorePath)
-                        .trustStorePassword(password)
-                        .clientAuth(REQUIRE)
-                        .keyStorePath(keyStorePath)
-                        .keyStorePassword(password)
-                        .build();
-
-try (IgniteClient client = IgniteClient.builder()
-    .addresses("localhost:10800")
-    .ssl(sslConfiguration)
-    .build()
-)
-----
-
-
-=== .NET
-
-Add the `IgniteClientConfiguration.SslStreamFactory` property of type `ISslStreamFactory`.
-
-Provide a link:https://github.com/apache/ignite/blob/66f43a4bee163aadb3ad731f6eb9a6dfde9faa73/modules/platforms/dotnet/Apache.Ignite.Core/Client/SslStreamFactory.cs[predefined implementation].
-
-Use the base class library `SslStream`.
-
-Basic usage without client authorization:
-
-[source,csharp]
-----
-var cfg = new IgniteClientConfiguration { SslStreamFactory = new() }
-----
-
-== CLI
-
-To SSL on the CLI side, use the `cli config set` command:
-
-[source,shell]
-----
-cli config set cli.trust-store.type=<type>
-cli config set cli.trust-store.path=<path>
-cli config set cli.trust-store.password=<password>
-----
-
-Store the CLI security configuration in a separate file with permission settings that protect it from unauthorized read/write operations. This configuration file must match profiles from the common configuration file.
-
-
-== Network Configuration
-
-The node network is based on the Netty framework. The configuration is the same as described for the Apache Ignite Client part except for the part that addresses the Apache Ignite 3.x configuration:
-
-[source,json]
-----
-"network": {
-    "ssl": {
-        "enabled": true,
-        "clientAuth": "none",
-        "keyStore": {
-            "type": "PKCS12",
-            "path": "must not be empty",
-            "password": "may be empty"
-        },
-        "trustStore": {
-            "type": "PKCS12",
-            "path": "must not be empty",
-            "password": "may be empty"
-        }
-    }
-}
-----
-
-== SSL Client Authentication (mTLS Support)
-
-Optionally, the connections you utilize can support the client authentication feature. Configure it separately for each connection on the server side.
-
-Two-way authentication requires that both server and client have certificates they reciprocally trust. The client generates a private key, stores it in its keystore, and gets it signed by an entity the server's truststore trusts.
-
-To support client authentication, a connection must include the `clientAuth`, `trustStore` and `keyStore` properties. Here is an example of a possible client configuration:
-
-[source,json]
-----
-clientConnector.ssl: {
-  enabled: true,
-  clientAuth: "require",
-  keyStore: {
-    path: "must not be empty",
-    password: "may be empty"
-  },
-  trustStore: {
-    type: "JKS",
-    path: "must not be empty",
-    password: "may be empty"
-  }
-}
-----
\ No newline at end of file
diff --git a/docs/_docs/storage/persistent.adoc b/docs/_docs/storage/persistent.adoc
deleted file mode 100644
index 92fa2c3..0000000
--- a/docs/_docs/storage/persistent.adoc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-= Persistent Storage
-
-
-== Overview
-
-Ignite Persistence is designed to provide a quick and responsive persistent storage.
-When the link:config/data-region[data region] is configured to use  Ignite persistence, Ignite stores all the data on disk, and loads as much data as it can into RAM for processing.
-
-The Native Persistence functionality is based on the following features:
-
-* Storing data partitions on disk
-* Checkpointing
-
-When persistence is enabled, Apache Ignite stores each partition in a separate file on disk. In addition to data partitions, Apache Ignite stores indexes and metadata.
-
-//image::images/persistent_store_structure.png[]
-
-== Checkpointing
-
-_Checkpointing_ is the process of copying dirty pages from RAM to partition files on disk. A dirty page is a page that was updated in RAM but was not written to the respective partition file.
-
-After a checkpoint is created, all changes are persisted to disk and will be available if the node crashes and is restarted.
-
-Checkpointing is designed to ensure durability of data and recovery in case of a node failure.
-
-// image:images/checkpointing-persistence.png[]
-
-This process helps to utilize disk space frugally by keeping pages in the most up-to-date state on disk.
-
-
-== Configuration Properties
-
-The following table describes some properties of javadoc:org.apache.ignite.configuration.DataStorageConfiguration[].
-
-[cols="1,1,3",opts="header", stripes=none]
-|===
-|name|| The name of the data region.
-|initSize|`256 * 1024 * 1024`| Sets the initial space allocated to the data region.
-|maxSize|`256 * 1024 * 1024`| Sets the maximum space that can be allocated to the data region.
-|replacementMode|`CLOCK`| Sets the page replacement algorithm.
-|===
-
-
-== Configuration Example
-
-The example below shows how to configure one data region that uses Ignite persistence:
-
-----
-ignite config set --type cluster \
-"{
-    aipersist.regions: [{
-        name: btree_persistent_region,
-        maxSize: 256000000
-    }]
-}"
-----
\ No newline at end of file
diff --git a/docs/_docs/table-views.adoc b/docs/_docs/table-views.adoc
deleted file mode 100644
index 1967fc7..0000000
--- a/docs/_docs/table-views.adoc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Table Views
-
-Apache Ignite 3.0 Alpha introduces the key-value API to access cached data via built-in table views.
-
-[NOTE]
-====
-Please note that the key-value API requires using a case sensitive collation for the database object names and parsing a string argument of the name that is passed through the API.
-
-See the examples below:
-
-* String values:
-+
-[source,text]
-----
-// Creates PUBLIC.MYTABLE.
-CREATE TABLE MyTable (id INT, val INT);
-
-// Returns PUBLIC.MYTABLE.
-ignite.tables().table("public.mytable"); ->
-
-// Creates PUBLIC.MyTable.
-CREATE TABLE \"MyTable\" (id INT, val INT); ->
-
-// Returns PUBLIC.MyTable.
-ignite.tables().table("public.\"MyTable\""); ->
-----
-
-* Tuples or columns:
-+
-[source,text]
-----
-// Creates PUBLIC.MYTABLE (ID, Id, VAL).
-CREATE TABLE MyTable (id INT, \"Id\" INT, val INT);
-
-Tuple tuple = ...
-
-// Returns ID column's value.
-tuple.value("id")
-
-// Returns ID column's value.
-tuple.value("Id")
-
-// Returns Id column's value.
-tuple.value("\"Id\"")
-----
-
-====
-
-== Key-Value View
-
-This table view maps key objects to value objects. The view cannot contain duplicate key objects; each key object can map to at most one value object.
-
-=== Running an Example
-
-Examples are shipped as a separate Maven project, which is located in the `examples` folder. `KeyValueViewExample` demonstrates the usage of the key-value view.
-
-To start running `KeyValueViewExample`, please refer to its link:https://github.com/apache/ignite-3/blob/main/examples/src/main/java/org/apache/ignite/example/table/KeyValueViewExample.java[JavaDoc,window=_blank] for instructions.
-
-== Record View
-
-This table view provides methods to access table records.
-
-=== Running an Example
-
-Examples are shipped as a separate Maven project, which is located in the `examples` folder. `RecordViewExample` demonstrates the usage of the record view.
-
-To start running `RecordViewExample`, please refer to its link:https://github.com/apache/ignite-3/blob/main/examples/src/main/java/org/apache/ignite/example/table/RecordViewExample.java[JavaDoc,window=_blank] for instructions.
-
-== Record Binary View
-
-NOTE: This is a table view implementation for the binary objects.
-
-You can use binary objects to access the cached data. The benefit of using binary objects is that you avoid deserialization, which is important if you access objects from a server node that does not have the object’s class representation.
-
-For more information on record binary view, please see the following link:https://github.com/apache/ignite-3/blob/main/modules/table/src/main/java/org/apache/ignite/internal/table/RecordBinaryViewImpl.java[implementation example,window=_blank].
\ No newline at end of file
diff --git a/docs/_docs/thin-clients/index.adoc b/docs/_docs/thin-clients/index.adoc
deleted file mode 100644
index 9e68d83..0000000
--- a/docs/_docs/thin-clients/index.adoc
+++ /dev/null
@@ -1,678 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Ignite Clients
-
-Apache Ignite 3 clients connect to the cluster via a standard socket connection. Unlike Ignite 2.x, there is no separate Thin and Thick clients in Apache Ignite 3. All clients are 'thin'.
-
-Clients do not become a part of the cluster topology, never hold any data, and are not used as a destination for compute calculations.
-
-== Getting Started
-
-=== Java Client
-
-==== Prerequisites
-
-To use Java thin client, Java 11 or newer is required.
-
-==== Installation
-
-Java client can be added to your project by using maven:
-
-[source, xml]
-----
-<dependency>
-    <groupId>org.apache.ignite</groupId>
-    <artifactId>ignite-client</artifactId>
-    <version>3.0.0-beta1</version>
-</dependency>
-----
-
-=== C# Client
-
-==== Prerequisites
-
-To use C# thin client, .NET 6.0 or newer is required.
-
-==== Installation
-
-C# client is available via NuGet. To add it, use the `add package` command:
-
-----
-dotnet add package Apache.Ignite --version 3.0.0-beta1
-----
-
-=== C++ Client
-
-==== Prerequisites
-
-To run C\++ client, you need a C++ build environment to run the `cmake` command:
-
-- C\++ compiler supporting C++ 17;
-- CMake 3.10+;
-- One of build systems: make, ninja, MS Visual Studio, or other;
-- Conan C/C++ package manager 1.X (optional).
-
-
-==== Installation
-
-The source code of the C++ client comes with the Apache Ignite 3 distribution. To build it, use the following commands:
-
-
-[tabs]
---
-tab:Windows[]
-[source,bat]
-----
-mkdir cmake-build-release
-cd cmake-build-release
-conan install .. --build=missing -s build_type=Release
-cmake ..
-cmake --build . -j8
-----
-
-tab:Linux[]
-[source,bash,subs="attributes,specialchars"]
-----
-mkdir cmake-build-release
-cd cmake-build-release
-conan install .. --build=missing -s build_type=Release -s compiler.libcxx=libstdc++11
-cmake .. -DCMAKE_BUILD_TYPE=Release
-cmake --build . -j8
-----
-
-tab:MacOS[]
-[source,bash,subs="attributes,specialchars"]
-----
-mkdir cmake-build-release
-cd cmake-build-release
-conan install .. --build=missing -s build_type=Release -s compiler.libcxx=libc++
-cmake .. -DCMAKE_BUILD_TYPE=Release
-cmake --build . -j8
-----
-
---
-
-== Connecting to Cluster
-
-To initialize a client, use the IgniteClient class, and provide it with the configuration:
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-try (IgniteClient client = IgniteClient.builder()
-  .addresses("127.0.0.1:10800")
-  .build()
-) {
-  // Your code goes here
-}
-----
-
-tab:.NET[]
-[source, csharp]
-----
-var clientCfg = new IgniteClientConfiguration
-{
-  Endpoints = { "127.0.0.1" }
-};
-using var client = await IgniteClient.StartAsync(clientCfg);
-----
-
-tab:C++[]
-[source, cpp]
-----
-using namespace ignite;
-
-ignite_client_configuration cfg{"127.0.0.1"};
-auto client = ignite_client::start(cfg, std::chrono::seconds(5));
-----
-
---
-
-
-== User Object Serialization
-
-Apache Ignite 3 supports mapping user objects to table tuples. This ensures that objects created in any programming language can be used for key-value operations directly.
-
-=== Limitations
-
-There are limitations to user types that can be used for such a mapping. Some limitations are common, and others are platform-specific due to the programming language used.
-
-- Only flat field structure is supported, meaning no nesting user objects. This is because Ignite tables, and therefore tuples have flat structure themselves;
-- Fields should be mapped to Ignite types;
-- All fields in user type should either be mapped to Table column or explicitly excluded;
-- All columns from Table should be mapped to some field in the user type;
-- *Java only*: Users should implement Mapper classes for user types for more flexibility;
-- *.NET only*: Any type (class, struct, record) is supported as long as all fields can be mapped to Ignite types;
-- *C++ only*: User has to provide marshaling functions explicitly as there is no reflection to generate them based on user type structure.
-
-=== Usage Examples
-
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-public static class Account {
-  public long id;
-  public long balance;
-
-  public Account() {}
-
-  public Account(long balance) {
-    this.balance = balance;
-  }
-}
-----
-
-tab:.NET[]
-[source, csharp]
-----
-public class Account
-{
-  public long Id { get; set; }
-  public long Balance { get; set; }
-
-  [NotMapped]
-  public Guid UnmappedId { get; set; }
-}
-----
-
-tab:C++[]
-[source, cpp]
-----
-struct account {
-  account() = default;
-  account(std::int64_t id) : id(id) {}
-  account(std::int64_t id, std::int64_t balance) : id(id), balance(balance) {}
-
-  std::int64_t id{0};
-  std::int64_t balance{0};
-};
-
-namespace ignite {
-
-  template<>
-  ignite_tuple convert_to_tuple(account &&value) {
-    ignite_tuple tuple;
-
-    tuple.set("id", value.id);
-    tuple.set("balance", value.balance);
-
-    return tuple;
-  }
-
-  template<>
-  account convert_from_tuple(ignite_tuple&& value) {
-    account res;
-
-    res.id = value.get<std::int64_t>("id");
-
-    // Sometimes only key columns are returned, i.e. "id",
-    // so we have to check whether there are any other columns.
-    if (value.column_count() > 1)
-      res.balance = value.get<std::int64_t>("balance");
-
-    return res;
-  }
-
-} // namespace ignite
-----
-
---
-
-== SQL API
-
-Apache Ignite 3 is focused on SQL, and SQL API is the primary way to work with the data. You can read more about supported SQL statements in the link:sql-reference/ddl[SQL Reference] section. Here is how you can send SQL requests:
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-try (Session session = client.sql().createSession()) {
-  ResultSet resultSet = session.execute(null, "SELECT name from POJO where id = 42");
-
-  SqlRow row = resultSet.next();
-  assert row.stringValue(0).equals("John Doe");
-}
-----
-
-tab:.NET[]
-[source, csharp]
-----
-IResultSet<IIgniteTuple> resultSet = await client.Sql.ExecuteAsync(transaction: null, "select name from tbl where id = ?", 42);
-List<IIgniteTuple> rows = await resultSet.ToListAsync();
-IIgniteTuple row = rows.Single();
-Debug.Assert(row["name"] as string == "John Doe");
-----
-
-tab:C++[]
-[source, cpp]
-----
-result_set result = client.get_sql().execute(nullptr, {"select name from tbl where id = ?"}, {std::int64_t{42});
-std::vector<ignite_tuple> page = result_set.current_page();
-ignite_tuple& row = page.front();
-
-assert(row->get<std::int64_t>("id") == 42);
-assert(row->get<std::string>("name") == "John Doe");
-----
-
---
-
-== Transactions
-
-All table operations in Apache Ignite 3 are transactional. You can provide an explicit transaction as a first argument of any Table and SQL API call. If you do not provide an explicit transaction, an implicit one will be created for every call.
-
-Here is how you  can provide a transaction explicitly:
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-KeyValueView<Long, Account> accounts =
-  table.keyValueView(Mapper.of(Long.class), Mapper.of(Account.class));
-
-accounts.put(null, 42, new Account(16_000));
-
-var tx = client.transactions().begin();
-
-Account account = accounts.get(tx, 42);
-account.balance += 500;
-accounts.put(tx, 42, account);
-
-assert accounts.get(tx, 42).balance == 16_500;
-
-tx.rollback();
-
-assert accounts.get(tx, 42).balance == 16_000;
-----
-
-tab:.NET[]
-[source, csharp]
-----
-var accounts = table.GetKeyValueView<long, Account>();
-await accounts.PutAsync(transaction: null, 42, new Account(16_000));
-
-await using ITransaction tx = await client.Transactions.BeginAsync();
-
-(Account account, bool hasValue) = await accounts.GetAsync(tx, 42);
-account = account with { Balance = account.Balance + 500 };
-
-await accounts.PutAsync(tx, 42, account);
-
-Debug.Assert((await accounts.GetAsync(tx, 42)).Value.Balance == 16_500);
-
-await tx.RollbackAsync();
-
-Debug.Assert((await accounts.GetAsync(null, 42)).Value.Balance == 16_000);
-
-public record Account(decimal Balance);
-----
-
-tab:C++[]
-[source, cpp]
-----
-auto accounts = table.get_key_value_view<account, account>();
-
-account init_value(42, 16'000);
-accounts.put(nullptr, {42}, init_value);
-
-auto tx = client.get_transactions().begin();
-
-std::optional<account> res_account = accounts.get(&tx, {42});
-res_account->balance += 500;
-accounts.put(&tx, {42}, res_account);
-
-assert(accounts.get(&tx, {42})->balance == 16'500);
-
-tx.rollback();
-
-assert(accounts.get(&tx, {42})->balance == 16'000);
-----
-
---
-
-== Table API
-
-To execute table operations on a specific table, you need to get a specific view of the table and use one of its methods. You can only create new tables by using SQL API.
-
-When working with tables, you can use built-in Tuple type, which is a set of key-value pairs underneath, or map the data to your own types for a strongly-typed access. Here is how you can work with tables:
-
-=== Getting a Table Instance
-
-First, get an instance of the table. To obtain an instance of table, use the `IgniteTables.table(String)` method. You can also use `IgniteTables.tables()` method to list all existing tables.
-
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-IgniteTables tableApi = client.tables();
-List<Table> existingTables = tableApi.tables();
-Table firstTable = existingTables.get(0);
-
-Table myTable = tableApi.table("MY_TABLE");
-----
-
-tab:.NET[]
-[source, csharp]
-----
-var existingTables = await Client.Tables.GetTablesAsync();
-var firstTable = existingTables[0];
-
-var myTable = await Client.Tables.GetTableAsync("MY_TABLE");
-----
-
-tab:C++[]
-[source, cpp]
-----
-using namespace ignite;
-
-auto table_api = client.get_tables();
-std::vector<table> existing_tables = table_api.get_tables();
-table first_table = existing_tables.front();
-
-std::optional<table> my_table = table_api.get_table("MY_TABLE);
-----
---
-
-=== Basic Table Operations
-
-Once you've got a table you need to get a specific view to choose how you want to operate table records.
-
-==== Binary Record View
-
-A binary record view. It can be used to operate table tuples directly.
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-RecordView<Tuple> view = table.recordView();
-
-Tuple fullRecord = Tuple.create()
-  .set("id", 42)
-  .set("name", "John Doe");
-
-view.upsert(null, fullRecord);
-
-Tuple keyRecord = Tuple.create().set("id", 42);
-
-Tuple resRecord = view.get(null, keyRecord);
-
-assert resRecord.columnCount() == 2;
-assert resRecord.intValue("id") == 42;
-assert resRecord.stringValue("name").equals("John Doe");
-----
-
-tab:.NET[]
-[source, csharp]
-----
-IRecordView<IIgniteTuple> view = table.RecordBinaryView;
-
-IIgniteTuple fullRecord = new IgniteTuple
-{
-  ["id"] = 42,
-  ["name"] = "John Doe"
-};
-
-await view.UpsertAsync(transaction: null, fullRecord);
-
-IIgniteTuple keyRecord = new IgniteTuple { ["id"] = 42 };
-(IIgniteTuple value, bool hasValue) = await view.GetAsync(transaction: null, keyRecord);
-
-Debug.Assert(hasValue);
-Debug.Assert(value.FieldCount == 2);
-Debug.Assert(value["id"] as int? == 42);
-Debug.Assert(value["name"] as string == "John Doe");
-----
-
-tab:C++[]
-[source, cpp]
-----
-record_view<ignite_tuple> view = table.get_record_binary_view();
-
-ignite_tuple record{
-  {"id", 42},
-  {"name", "John Doe"}
-};
-
-view.upsert(nullptr, record);
-std::optional<ignite_tuple> res_record = view.get(nullptr, {"id", 42});
-
-assert(res_record.has_value());
-assert(res_record->column_count() == 2);
-assert(res_record->get<std::int64_t>("id") == 42);
-assert(res_record->get<std::string>("name") == "John Doe");
-----
-
---
-
-==== Record View
-
-A record view mapped to a user type. It can be used to operate table using user objects which are mapped to table tuples.
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-RecordView<Pojo> pojoView = table.recordView(Mapper.of(Pojo.class));
-
-pojoView.upsert(null, new Pojo(42, "John Doe"));
-Pojo resRecord = pojoView.get(null, new Pojo(42));
-
-assert resRecord.id == 42;
-assert resRecord.name.equals("John Doe");
-----
-
-tab:.NET[]
-[source, csharp]
-----
-var pocoView = table.GetRecordView<Poco>();
-
-await pocoView.UpsertAsync(transaction: null, new Poco(42, "John Doe"));
-var (value, hasValue) = await pocoView.GetAsync(transaction: null, new Poco(42));
-
-Debug.Assert(hasValue);
-Debug.Assert(value.Name == "John Doe");
-
-public record Poco(long Id, string? Name = null);
-----
-
-tab:C++[]
-[source, cpp]
-----
-record_view<person> view = table.get_record_view<person>();
-
-person record(42, "John Doe");
-
-view.upsert(nullptr, record);
-std::optional<person> res_record = view.get(nullptr, person{42});
-
-assert(res.has_value());
-assert(res->id == 42);
-assert(res->name == "John Doe");
-----
-
---
-
-==== Key-Value Binary View
-
-A binary key-value view. It can be used to operate table using key and value tuples separately.
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-KeyValueView<Tuple, Tuple> kvView = table.keyValueView();
-
-Tuple key = Tuple.create().set("id", 42)
-Tuple val = Tuple.create().set("name", "John Doe");
-
-kvView.put(null, key, val);
-Tuple res = kvView.get(null, key);
-
-assert res.columnCount() == 1;
-assert res.stringValue("name").equals("John Doe");
-----
-
-tab:.NET[]
-[source, csharp]
-----
-IKeyValueView<IIgniteTuple, IIgniteTuple> kvView = table.KeyValueBinaryView;
-
-IIgniteTuple key = new IgniteTuple { ["id"] = 42 };
-IIgniteTuple val = new IgniteTuple { ["name"] = "John Doe" };
-
-await kvView.PutAsync(transaction: null, key, val);
-(IIgniteTuple? value, bool hasValue) = await kvView.GetAsync(transaction: null, key);
-
-Debug.Assert(hasValue);
-Debug.Assert(value.FieldCount == 1);
-Debug.Assert(value["name"] as string == "John Doe");
-----
-
-tab:C++[]
-[source, cpp]
-----
-key_value_view<ignite_tuple, ignite_tuple> kv_view = table.get_key_value_binary_view();
-
-ignite_tuple key_tuple{{"id", 42}};
-ignite_tuple val_tuple{{"name", "John Doe"}};
-
-kv_view.put(nullptr, key_tuple, val_tuple);
-std::optional<ignite_tuple> res_tuple = kv_view.get(nullptr, key_tuple);
-
-assert(res_tuple.has_value());
-assert(res_tuple->column_count() == 2);
-assert(res_tuple->get<std::int64_t>("id") == 42);
-assert(res_tuple->get<std::string>("name") == "John Doe");
-----
-
---
-
-
-==== Key-Value View
-
-A key-value view with user objects. It can be used to operate table using key and value user objects mapped to table tuples.
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-KeyValueView<Long, Pojo> pojoView =
-  table.keyValueView(Mapper.of(Long.class), Mapper.of(Pojo.class));
-
-pojoView.put(null, 42, new Pojo("John Doe"));
-Pojo val = pojoView.get(null, 42);
-
-assert val.name.equals("John Doe");
-----
-
-tab:.NET[]
-[source, csharp]
-----
-IKeyValueView<long, Poco> kvView = table.GetKeyValueView<long, Poco>();
-
-await kvView.PutAsync(transaction: null, 42, new Poco(Id: 0, Name: "John Doe"));
-(Poco? value, bool hasValue) = await kvView.GetAsync(transaction: null, 42);
-
-Debug.Assert(hasValue);
-Debug.Assert(value.Name == "John Doe");
-
-public record Poco(long Id, string? Name = null);
-----
-
-tab:C++[]
-[source, cpp]
-----
-key_value_view<person, person> kv_view = table.get_key_value_view<person, person>();
-
-kv_view.put(nullptr, {42}, {"John Doe"});
-std::optional<person> res = kv_view.get(nullptr, {42});
-
-assert(res.has_value());
-assert(res->id == 42);
-assert(res->name == "John Doe");
-----
-
---
-
-
-
-== Executing Compute Tasks
-
-Apache Ignite 3 clients support basic compute capabilities. You can execute compute tasks that are already deployed in the cluster.
-
-You can run a task across all cluster nodes or a specific cluster group. The deployment assumes that you create a JAR file with the compute tasks and add the JAR to the cluster nodes' classpath.
-
-The example below shows how to get access to the compute APIs and execute the compute task named `MyTask`:
-
-[tabs]
---
-tab:Java[]
-[source, java]
-----
-String result = client.compute().<String>execute(
-  client.clusterNodes(), "MyTask", "Lorem", "ipsum", 42);
-----
-
-tab:.NET[]
-[source, csharp]
-----
-IList<IClusterNode> nodes = await client.GetClusterNodesAsync();
-string res = await client.Compute.ExecuteAsync<string>(nodes, "org.foo.bar.MyTask", 42)
-----
-
-tab:C++[]
-[source, cpp]
-----
-std::vector<cluster_node> nodes = client.get_cluster_nodes();
-std::optional<primitive> res = client.get_compute().execute(nodes, "org.foo.bar.MyTask", {42})
-std::cout << res->get<std::string>() << std::endl;
-----
-
---
-
-
-== Partition Awareness
-
-In Apache Ignite 3, partition awareness is enabled automatically for all clients.
-
-Data in the cluster is distributed between the nodes in a balanced manner for scalability and performance reasons. Each cluster node maintains a subset of the data, and the partition distribution map, which is used to determine the node that keeps the primary/backup copy of requested entries.
-
-Partition awareness allows the client to send query requests directly to the node that owns the queried data.
-
-Without partition awareness, an application that is connected to the cluster via a client would execute all queries and operations via a single server node that acts as a proxy for the incoming requests.
-These operations would then be re-routed to the node that stores the data that is being requested.
-This would result in a bottleneck that could prevent the application from scaling linearly.
-
-image::images/partitionawareness01.png[Without Partition Awareness]
-
-Notice how queries must pass through the proxy server node, where they are routed to the correct node.
-
-With partition awareness in place, the client can directly route queries and operations to the primary nodes that own the data required for the queries.
-This eliminates the bottleneck, allowing the application to scale more easily.
-
-image::images/partitionawareness02.png[With Partition Awareness]
diff --git a/docs/_docs/transactions/performing-transactions.adoc b/docs/_docs/transactions/performing-transactions.adoc
deleted file mode 100644
index f2f5288..0000000
--- a/docs/_docs/transactions/performing-transactions.adoc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-= Performing Transactions
-
-This Beta release introduces the key-value API that provides an interface for starting and completing transactions.
-
-=== Synchronous Transactions
-
-Use the `igniteTransactions` class to create a transaction object, and the `commit` method to send it.
-
-[tabs]
---
-tab:Java[]
-[source,java]
-----
-Transaction tx = igniteTransactions.begin();
-tx.commit()
-----
---
-
-You can also perform a rollback with the `rollback` command:
-
-[tabs]
---
-tab:Java[]
-[source,java]
-----
-Transaction tx = igniteTransactions.begin();
-tx.rollback()
-----
---
-
-
-Here is the example of a transaction that transfers money from one account to another, and handles a possible overdraft:
-
---
-[source,java]
-----
-Transaction tx = igniteTransactions.begin();
-
-try {
-    Tuple row1 = accounts.get(tx, Tuple.create().set("accountId", 1));
-    if (row1.doubleValue("balance") - amount < 0) {
-        tx.rollback();
-        return false;
-    }
-    Tuple row2 = accounts.get(tx, Tuple.create().set("accountId", 2);
-    accounts.upsert(tx, Tuple.create().set("accountId", 1).set("balance", row1.doubleValue("balance") - amount));
-    accounts.upsert(tx, Tuple.create().set("accountId", 2).set("balance", row2.doubleValue("balance") + amount));
-    tx.commit();
-} catch (Throwable t) {
-    tx.rollback();
-}
-----
---
-
-== Asynchronous Transactions
-
-You can also perform transactions asynchronously.
-
-[tabs]
---
-tab:Java[]
-[source,java]
-----
-protected Table accounts;
-protected Table customers;
-
-accounts.recordView().upsert(null, makeValue(1, BALANCE_1));
-accounts.recordView().upsert(null, makeValue(2, BALANCE_2));
-igniteTransactions.beginAsync()
-    .thenCompose(tx -> accounts.recordView().getAsync(tx, makeKey(1))
-        .thenCombine(accounts.recordView().getAsync(tx, makeKey(2)), (v1, v2) -> new Pair<>(v1, v2))
-        .thenCompose(pair -> allOf(
-            accounts.recordView().upsertAsync(
-                tx, makeValue(1, pair.getFirst().doubleValue("balance") - DELTA)),
-            accounts.recordView().upsertAsync(
-                tx, makeValue(2, pair.getSecond().doubleValue("balance") + DELTA))
-        )
-        .thenApply(ignored -> tx)
-        )
-    ).thenCompose(Transaction::commitAsync).join();
-----
---
-
-
-== Implicit Transaction Management
-
-Apache Ignite 3 also provides implicit transaction managemet for dealing with simpler transactions by using the `runInTransaction` class. When using it, the following will be done automatically:
-
-- The transaction is started and substituted to the closure.
-- The transaction is committed if no exceptions were thrown during the closure.
-- The transaction will be retried in case of recoverable error. Closure must be purely functional - not causing side effects.
-
-Here is the example of a transaction that transfers money from one account to another, and handles a possible overdraft:
-
-[tabs]
---
-tab:Java[]
-[source,java]
-----
-igniteTransactions.runInTransaction(tx -> {
-    CompletableFuture<Tuple> fut1 = view.getAsync(tx, Tuple.create().set("accountId", 1));
-    CompletableFuture<Tuple> fut2 = view.getAsync(tx, Tuple.create().set("accountId", 2)); // Read second balance concurrently
-    if (fut1.join().doubleValue("balance") - amount < 0) {
-        tx.rollback();
-        return;
-    }
-
-    view.upsertAsync(tx, Tuple.create().set("accountId", 1).set("balance", fut1.join().doubleValue("balance") - amount));
-    view.upsertAsync(tx, Tuple.create().set("accountId", 2).set("balance", fut2.join().doubleValue("balance") + amount);
-});
-----
---
\ No newline at end of file