feat(store): integrate `store-client` submodule
diff --git a/hugegraph-store/hg-store-client/pom.xml b/hugegraph-store/hg-store-client/pom.xml
new file mode 100644
index 0000000..2c402d3
--- /dev/null
+++ b/hugegraph-store/hg-store-client/pom.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-store</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+
+    <artifactId>hg-store-client</artifactId>
+
+    <properties>
+        <maven.compiler.source>11</maven.compiler.source>
+        <maven.compiler.target>11</maven.compiler.target>
+        <maven.test.skip>true</maven.test.skip>
+        <log4j2.version>2.15.0</log4j2.version>
+        <lombok.version>1.18.20</lombok.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-store-grpc</artifactId>
+            <version>${revision}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-store-common</artifactId>
+            <version>${revision}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-client</artifactId>
+            <version>${revision}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-grpc</artifactId>
+            <version>${revision}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>${lombok.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-slf4j-impl</artifactId>
+            <version>${log4j2.version}</version>
+        </dependency>
+        <!-- test -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.13.2</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java-util</artifactId>
+            <version>3.17.2</version>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.7</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-databind</artifactId>
+            <version>2.13.0</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>commons-codec</groupId>
+            <artifactId>commons-codec</artifactId>
+            <version>1.15</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+</project>
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java
new file mode 100644
index 0000000..ff44db0
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public interface HgKvEntry {
+
+    byte[] key();
+
+    byte[] value();
+
+    default int code() {
+        return -1;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java
new file mode 100644
index 0000000..38c8b00
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import java.io.Closeable;
+import java.util.Iterator;
+
+/**
+ * created on 2021/10/21
+ */
+public interface HgKvIterator<E> extends Iterator<E>, HgSeekAble, Closeable {
+
+    byte[] key();
+
+    byte[] value();
+
+    @Override
+    void close();
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java
new file mode 100644
index 0000000..52df012
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * created on 2022/03/10
+ */
+public interface HgKvOrderedIterator<E> extends HgKvIterator<E>, Comparable<HgKvOrderedIterator> {
+
+    long getSequence();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java
new file mode 100644
index 0000000..ba9fa33
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * created on 2021/10/24
+ */
+public interface HgKvPagingIterator<E> extends HgKvIterator<E>, HgPageSize {
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java
new file mode 100644
index 0000000..db64059
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import java.util.List;
+
+import org.apache.hugegraph.store.client.grpc.KvCloseableIterator;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+/**
+ * @version 0.2.0
+ */
+public interface HgKvStore {
+
+    /**
+     * CAUTION: THE CONST BELOW MUST KEEP CONSISTENCE TO ScanIterator.Trait.
+     */
+    int SCAN_ANY = 0x80;
+    int SCAN_PREFIX_BEGIN = 0x01;
+    int SCAN_PREFIX_END = 0x02;
+    int SCAN_GT_BEGIN = 0x04;
+    int SCAN_GTE_BEGIN = 0x0c;
+    int SCAN_LT_END = 0x10;
+    int SCAN_LTE_END = 0x30;
+    int SCAN_KEYONLY = 0x40;
+    int SCAN_HASHCODE = 0x100;
+
+    boolean put(String table, HgOwnerKey ownerKey, byte[] value);
+
+    /**
+     * 该版本被store内部使用。向分区写入数据,
+     * partitionId与key.keyCode必须与pd存储的分区信息保持一致。
+     */
+    boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value);
+
+    byte[] get(String table, HgOwnerKey ownerKey);
+
+    boolean clean(int partId);
+
+    boolean delete(String table, HgOwnerKey ownerKey);
+
+    boolean deleteSingle(String table, HgOwnerKey ownerKey);
+
+    boolean deletePrefix(String table, HgOwnerKey prefix);
+
+    boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end);
+
+    boolean merge(String table, HgOwnerKey key, byte[] value);
+
+    @Deprecated
+    List<HgKvEntry> batchGetOwner(String table, List<HgOwnerKey> keyList);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, byte[] query);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, long limit);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, long limit, byte[] query);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit,
+                                         byte[] query);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+                                         long limit);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+                                         long limit, byte[] query);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+                                         long limit, int scanType, byte[] query);
+
+    HgKvIterator<HgKvEntry> scanIterator(String table, int codeFrom, int codeTo, int scanType,
+                                         byte[] query);
+
+    // HgKvIterator<HgKvEntry> scanIterator(ScanStreamReq scanReq);
+
+    HgKvIterator<HgKvEntry> scanIterator(ScanStreamReq.Builder scanReqBuilder);
+
+    long count(String table);
+
+    boolean truncate();
+
+    default boolean existsTable(String table) {
+        return false;
+    }
+
+    boolean createTable(String table);
+
+    boolean deleteTable(String table);
+
+    boolean dropTable(String table);
+
+    boolean deleteGraph(String graph);
+
+    List<HgKvIterator<HgKvEntry>> scanBatch(HgScanQuery scanQuery);
+
+    KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch2(HgScanQuery scanQuery);
+
+    KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch3(HgScanQuery scanQuery,
+                                                            KvCloseableIterator iterator);
+
+    HgKvIterator<HgKvEntry> batchPrefix(String table, List<HgOwnerKey> prefixList);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java
new file mode 100644
index 0000000..e9245b3
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_BYTES;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_OWNER_KEY;
+
+import java.io.Serializable;
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+
+/**
+ * created on 2021/10/14
+ *
+ * @version 1.3.0 add canceled assert
+ */
+public class HgOwnerKey implements Serializable {
+
+    private final byte[] owner; // TODO: consider remove? since it seems to be useless
+    private int keyCode = 0;// TODO: Be here OK?
+    private byte[] key;
+    // Sequence number, used for batch queries to ensure the order of returned results
+    private int serialNo;
+
+    /**
+     * @param owner
+     * @param key
+     * @see HgOwnerKey:of(byte[] owner, byte[] key)
+     */
+    @Deprecated
+    public HgOwnerKey(byte[] owner, byte[] key) {
+        if (owner == null) {
+            owner = EMPTY_BYTES;
+        }
+        if (key == null) {
+            key = EMPTY_BYTES;
+        }
+        this.owner = owner;
+        this.key = key;
+    }
+
+    public HgOwnerKey(int code, byte[] key) {
+        if (key == null) {
+            key = EMPTY_BYTES;
+        }
+        this.owner = EMPTY_BYTES;
+        this.key = key;
+        this.keyCode = code;
+    }
+
+    public static HgOwnerKey emptyOf() {
+        return EMPTY_OWNER_KEY;
+    }
+
+    public static HgOwnerKey newEmpty() {
+        return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES);
+    }
+
+    public static HgOwnerKey ownerOf(byte[] owner) {
+        return new HgOwnerKey(owner, EMPTY_BYTES);
+    }
+
+    public static HgOwnerKey codeOf(int code) {
+        return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES).setKeyCode(code);
+    }
+
+    public static HgOwnerKey of(byte[] owner, byte[] key) {
+        return new HgOwnerKey(owner, key);
+    }
+
+    public static HgOwnerKey of(int keyCode, byte[] key) {
+        return new HgOwnerKey(keyCode, key);
+    }
+
+    public byte[] getOwner() {
+        return owner;
+    }
+
+    public byte[] getKey() {
+        return key;
+    }
+
+    public int getKeyCode() {
+        return keyCode;
+    }
+
+    public HgOwnerKey setKeyCode(int keyCode) {
+        this.keyCode = keyCode;
+        return this;
+    }
+
+    public HgOwnerKey codeToKey(int keyCode) {
+        this.keyCode = keyCode;
+        this.key = HgStoreClientUtil.toIntBytes(keyCode);
+        return this;
+    }
+
+    public int getSerialNo() {
+        return this.serialNo;
+    }
+
+    public HgOwnerKey setSerialNo(int serialNo) {
+        this.serialNo = serialNo;
+        return this;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        HgOwnerKey that = (HgOwnerKey) o;
+        return Arrays.equals(owner, that.owner) && Arrays.equals(key, that.key);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = Arrays.hashCode(owner);
+        result = 31 * result + Arrays.hashCode(key);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "HgOwnerKey{" +
+               "owner=" + Arrays.toString(owner) +
+               ", key=" + Arrays.toString(key) +
+               ", code=" + keyCode +
+               '}';
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java
new file mode 100644
index 0000000..38163d5
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * Return the amount of records returned by one query in pageable-query.
+ * <p>
+ * created on 2021/10/24
+ */
+public interface HgPageSize {
+
+    long getPageSize();
+
+    default boolean isPageEmpty() {
+        return false;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java
new file mode 100644
index 0000000..80cdb77
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public final class HgPrivate {
+
+    private static final HgPrivate INSTANCE = new HgPrivate();
+
+    private HgPrivate() {
+    }
+
+    static HgPrivate of() {
+        return INSTANCE;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java
new file mode 100644
index 0000000..cc64ba9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.grpc.common.ScanOrderType;
+
+/**
+ * 2022/3/4
+ *
+ * @version 0.5.0
+ */
+public interface HgScanQuery {
+
+    static HgScanQuery tableOf(String table) {
+        return ScanBuilder.tableOf(table).build();
+    }
+
+    static HgScanQuery rangeOf(String table, List<HgOwnerKey> startList, List<HgOwnerKey> endList) {
+        return ScanBuilder.rangeOf(table, startList, endList).build();
+    }
+
+    static HgScanQuery prefixOf(String table, List<HgOwnerKey> prefixList) {
+        return ScanBuilder.prefixOf(table, prefixList).build();
+    }
+
+    static HgScanQuery prefixOf(String table, List<HgOwnerKey> prefixList,
+                                ScanOrderType orderType) {
+        return ScanBuilder.prefixOf(table, prefixList).setOrderType(orderType).build();
+    }
+
+    static HgScanQuery prefixIteratorOf(String table, Iterator<HgOwnerKey> prefixItr) {
+        return ScanBuilder.prefixIteratorOf(table, prefixItr).build();
+    }
+
+    static HgScanQuery prefixIteratorOf(String table, Iterator<HgOwnerKey> prefixItr,
+                                        ScanOrderType orderType) {
+        return ScanBuilder.prefixIteratorOf(table, prefixItr).setOrderType(orderType).build();
+    }
+
+    String getTable();
+
+    HgScanQuery.ScanMethod getScanMethod();
+
+    List<HgOwnerKey> getPrefixList();
+
+    Iterator<HgOwnerKey> getPrefixItr();
+
+    List<HgOwnerKey> getStartList();
+
+    List<HgOwnerKey> getEndList();
+
+    long getLimit();
+
+    long getPerKeyLimit();
+
+    long getPerKeyMax();
+
+    long getSkipDegree();
+
+    int getScanType();
+
+    ScanOrderType getOrderType();
+
+    boolean isOnlyKey();
+
+    byte[] getQuery();
+
+    ScanBuilder builder();
+
+    enum ScanMethod {
+        ALL,
+        PREFIX,
+        RANGE
+    }
+
+    enum SortType {
+        UNSORTED,
+        SORT_BY_EDGE,
+        SORT_BY_VERTEX
+    }
+
+    class ScanBuilder {
+
+        private final String table;
+        private final HgScanQuery.ScanMethod sanMethod;
+        private long limit = Integer.MAX_VALUE;
+        private long perKeyLimit = Integer.MAX_VALUE;
+        private long perKeyMax = Integer.MAX_VALUE;
+        private int scanType;
+        private ScanOrderType orderType;
+
+        private long skipDegree;
+
+        private boolean onlyKey;
+        private byte[] query;
+        private List<HgOwnerKey> prefixList;
+        private List<HgOwnerKey> startList;
+        private List<HgOwnerKey> endList;
+        private Iterator<HgOwnerKey> prefixItr;
+
+        ScanBuilder(HgScanQuery.ScanMethod sanMethod, String table) {
+            this.table = table;
+            this.sanMethod = sanMethod;
+            this.orderType = ScanOrderType.ORDER_NONE;
+        }
+
+        public static ScanBuilder rangeOf(String table, List<HgOwnerKey> startList,
+                                          List<HgOwnerKey> endList) {
+            HgAssert.isArgumentValid(table, "table");
+            HgAssert.isArgumentValid(startList, "startList");
+            HgAssert.isArgumentValid(endList, "endList");
+            HgAssert.isTrue(startList.size() == endList.size()
+                    , "The size of startList not equals endList's.");
+
+            ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.RANGE, table);
+            res.startList = startList;
+            res.endList = endList;
+            res.scanType = HgKvStore.SCAN_GTE_BEGIN | HgKvStore.SCAN_LTE_END;
+            return res;
+        }
+
+        public static ScanBuilder prefixOf(String table, List<HgOwnerKey> prefixList) {
+            HgAssert.isArgumentValid(table, "table");
+            HgAssert.isArgumentValid(prefixList, "prefixList");
+
+            ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table);
+            res.prefixList = prefixList;
+            return res;
+
+        }
+
+        public static ScanBuilder prefixIteratorOf(String table, Iterator<HgOwnerKey> prefixItr) {
+            HgAssert.isArgumentValid(table, "table");
+
+            ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table);
+            res.prefixItr = prefixItr;
+            return res;
+
+        }
+
+        public static ScanBuilder tableOf(String table) {
+            HgAssert.isArgumentValid(table, "table");
+
+            return new ScanBuilder(HgScanQuery.ScanMethod.ALL, table);
+        }
+
+        public ScanBuilder setLimit(long limit) {
+            this.limit = limit;
+            return this;
+        }
+
+        public ScanBuilder setPerKeyLimit(long limit) {
+            this.perKeyLimit = limit;
+            return this;
+        }
+
+        public ScanBuilder setPerKeyMax(long max) {
+            this.perKeyMax = max;
+            return this;
+        }
+
+        public ScanBuilder setScanType(int scanType) {
+            this.scanType = scanType;
+            return this;
+        }
+
+        public ScanBuilder setOrderType(ScanOrderType orderType) {
+            this.orderType = orderType;
+            return this;
+        }
+
+        public ScanBuilder setQuery(byte[] query) {
+            this.query = query;
+            return this;
+        }
+
+        public ScanBuilder setSkipDegree(long skipDegree) {
+            this.skipDegree = skipDegree;
+            return this;
+        }
+
+        public ScanBuilder setOnlyKey(boolean onlyKey) {
+            this.onlyKey = onlyKey;
+            return this;
+        }
+
+        public HgScanQuery build() {
+            return this.new BatchScanQuery();
+        }
+
+        private class BatchScanQuery implements HgScanQuery {
+
+            @Override
+            public String getTable() {
+                return table;
+            }
+
+            @Override
+            public HgScanQuery.ScanMethod getScanMethod() {
+                return sanMethod;
+            }
+
+            @Override
+            public List<HgOwnerKey> getPrefixList() {
+                if (prefixList == null) {
+                    return Collections.EMPTY_LIST;
+                } else {
+                    return Collections.unmodifiableList(prefixList);
+                }
+            }
+
+            @Override
+            public Iterator<HgOwnerKey> getPrefixItr() {
+                return prefixItr;
+            }
+
+            @Override
+            public List<HgOwnerKey> getStartList() {
+                if (startList == null) {
+                    return Collections.EMPTY_LIST;
+                } else {
+                    return Collections.unmodifiableList(startList);
+                }
+            }
+
+            @Override
+            public List<HgOwnerKey> getEndList() {
+                if (endList == null) {
+                    return Collections.EMPTY_LIST;
+                } else {
+                    return Collections.unmodifiableList(endList);
+                }
+            }
+
+            @Override
+            public long getLimit() {
+                return limit;
+            }
+
+            @Override
+            public long getPerKeyLimit() {
+                return perKeyLimit;
+            }
+
+            @Override
+            public long getPerKeyMax() {
+                return perKeyMax;
+            }
+
+            @Override
+            public long getSkipDegree() {
+                return skipDegree;
+            }
+
+            @Override
+            public int getScanType() {
+                return scanType;
+            }
+
+            @Override
+            public ScanOrderType getOrderType() {
+                return orderType;
+            }
+
+            @Override
+            public boolean isOnlyKey() {
+                return onlyKey;
+            }
+
+            @Override
+            public byte[] getQuery() {
+                return query;
+            }
+
+            @Override
+            public ScanBuilder builder() {
+                return ScanBuilder.this;
+            }
+
+            @Override
+            public String toString() {
+                final StringBuffer sb = new StringBuffer("HgScanQuery{");
+                sb.append("table='").append(getTable()).append('\'');
+                sb.append(", scanMethod=").append(getScanMethod());
+                sb.append(", prefixList=").append(getPrefixList());
+                sb.append(", startList=").append(getStartList());
+                sb.append(", endList=").append(getEndList());
+                sb.append(", limit=").append(getLimit());
+                sb.append(", perKeyLimit=").append(getPerKeyLimit());
+                sb.append(", perKeyMax=").append(getPerKeyMax());
+                sb.append(", skipDegree=").append(getSkipDegree());
+                sb.append(", scanType=").append(getScanType());
+                sb.append(", orderType=").append(getOrderType());
+                sb.append(", onlyKey=").append(isOnlyKey());
+                sb.append(", query=");
+                if (query == null) {
+                    sb.append("null");
+                } else {
+                    sb.append('[');
+                    for (int i = 0; i < query.length; ++i) {
+                        sb.append(i == 0 ? "" : ", ").append(query[i]);
+                    }
+                    sb.append(']');
+                }
+                sb.append('}');
+                return sb.toString();
+            }
+        }
+
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java
new file mode 100644
index 0000000..fe6a580
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * created on 2022/03/11
+ */
+public interface HgSeekAble {
+
+    byte[] position();
+
+    void seek(byte[] position);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java
new file mode 100644
index 0000000..37c2184
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.client.HgStoreSessionProvider;
+
+/**
+ * Maintain HgStoreSession instances.
+ * HgStore-clusters.
+ */
+
+@ThreadSafe
+public final class HgSessionManager {
+
+    // TODO: Holding more than one HgSessionManager is available,if you want to connect multi
+    private final static HgSessionManager INSTANCE = new HgSessionManager();
+    private final HgSessionProvider sessionProvider;
+
+    private HgSessionManager() {
+        // TODO: constructed by SPI
+        this.sessionProvider = new HgStoreSessionProvider();
+    }
+
+    public static HgSessionManager getInstance() {
+        return INSTANCE;
+    }
+
+    /**
+     * Retrieve or create a HgStoreSession.
+     *
+     * @param graphName
+     * @return
+     */
+    public HgStoreSession openSession(String graphName) {
+        return this.sessionProvider.createSession(graphName);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java
new file mode 100644
index 0000000..7049c27
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public interface HgSessionProvider {
+
+    HgStoreSession createSession(String graphName);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java
new file mode 100644
index 0000000..0f8ebb9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.HgStoreNodePartitionerImpl;
+import org.apache.hugegraph.store.client.HgStoreSessionProvider;
+
+/**
+ * Maintain HgStoreSession instances.
+ * HgStore-clusters.
+ */
+
+@ThreadSafe
+public final class HgStoreClient {
+
+    // TODO: Holding more than one HgSessionManager is available,if you want to connect multi
+    private final HgSessionProvider sessionProvider;
+    private PDClient pdClient;
+
+    public HgStoreClient() {
+        this.sessionProvider = new HgStoreSessionProvider();
+    }
+
+    public HgStoreClient(PDConfig config) {
+        this.sessionProvider = new HgStoreSessionProvider();
+        pdClient = PDClient.create(config);
+        setPdClient(pdClient);
+    }
+
+    public HgStoreClient(PDClient pdClient) {
+        this.sessionProvider = new HgStoreSessionProvider();
+        setPdClient(pdClient);
+    }
+
+    public static HgStoreClient create(PDConfig config) {
+        return new HgStoreClient(config);
+    }
+
+    public static HgStoreClient create(PDClient pdClient) {
+        return new HgStoreClient(pdClient);
+    }
+
+    public static HgStoreClient create() {
+        return new HgStoreClient();
+    }
+
+    public void setPDConfig(PDConfig config) {
+        pdClient = PDClient.create(config);
+        setPdClient(pdClient);
+    }
+
+    /**
+     * Retrieve or create a HgStoreSession.
+     *
+     * @param graphName
+     * @return
+     */
+    public HgStoreSession openSession(String graphName) {
+        return this.sessionProvider.createSession(graphName);
+    }
+
+    public PDClient getPdClient() {
+        return pdClient;
+    }
+
+    public void setPdClient(PDClient client) {
+        this.pdClient = client;
+        HgStoreNodeManager nodeManager =
+                HgStoreNodeManager.getInstance();
+
+        HgStoreNodePartitionerImpl p = new HgStoreNodePartitionerImpl(pdClient, nodeManager);
+        nodeManager.setNodeProvider(p);
+        nodeManager.setNodePartitioner(p);
+        nodeManager.setNodeNotifier(p);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java
new file mode 100644
index 0000000..2e595e1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+
+public interface HgStoreSession extends HgKvStore {
+
+    void beginTx();
+
+    /**
+     * @throws IllegalStateException  when the tx hasn't been beginning.
+     * @throws HgStoreClientException when failed to commit .
+     */
+    void commit();
+
+    /**
+     * @throws IllegalStateException  when the tx hasn't been beginning.
+     * @throws HgStoreClientException when failed to rollback.
+     */
+    void rollback();
+
+    boolean isTx();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java
new file mode 100644
index 0000000..8e08ab6
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public interface HgTkvEntry {
+
+    String table();
+
+    byte[] key();
+
+    byte[] value();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java
new file mode 100644
index 0000000..57ca4d4
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public interface HgTokvEntry {
+
+    String table();
+
+    HgOwnerKey ownerKey();
+
+    byte[] value();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java
new file mode 100644
index 0000000..6fa354e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Objects;
+
+/**
+ * Immutable Object Pattern
+ * <p>
+ * created on 2021/10/26
+ */
+public final class HgNodePartition {
+
+    private final Long nodeId;
+    //当前key的hashcode
+    private final Integer keyCode;
+
+    //分区的开始结束范围
+    private final Integer startKey;
+    private final Integer endKey;
+    private int hash = -1;
+
+    HgNodePartition(Long nodeId, Integer keyCode) {
+        this.nodeId = nodeId;
+        this.keyCode = keyCode;
+        this.startKey = this.endKey = keyCode;
+    }
+
+    HgNodePartition(Long nodeId, Integer keyCode, Integer startKey, Integer endKey) {
+        this.nodeId = nodeId;
+        this.keyCode = keyCode;
+        this.startKey = startKey;
+        this.endKey = endKey;
+    }
+
+    public static HgNodePartition of(Long nodeId, Integer keyCode) {
+        return new HgNodePartition(nodeId, keyCode);
+    }
+
+    public static HgNodePartition of(Long nodeId, Integer keyCode, Integer startKey,
+                                     Integer endKey) {
+        return new HgNodePartition(nodeId, keyCode, startKey, endKey);
+    }
+
+    public Long getNodeId() {
+        return nodeId;
+    }
+
+    public Integer getKeyCode() {
+        return keyCode;
+    }
+
+    public Integer getStartKey() {
+        return startKey;
+    }
+
+    public Integer getEndKey() {
+        return endKey;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        HgNodePartition that = (HgNodePartition) o;
+        return Objects.equals(nodeId, that.nodeId) && Objects.equals(keyCode, that.keyCode);
+    }
+
+    @Override
+    public int hashCode() {
+        if (this.hash == -1) {
+            this.hash = Objects.hash(nodeId, keyCode);
+        }
+        return this.hash;
+    }
+
+    @Override
+    public String toString() {
+        return "HgNodePartition{" +
+               "nodeId=" + nodeId +
+               ", partitionId=" + keyCode +
+               '}';
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java
new file mode 100644
index 0000000..4bb0705
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static org.apache.hugegraph.store.client.util.HgAssert.isFalse;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+/**
+ * created on 2021/10/26
+ *
+ * @version 1.0.0
+ */
+@NotThreadSafe
+public final class HgNodePartitionerBuilder {
+
+    private Set<HgNodePartition> partitions = null;
+
+    static HgNodePartitionerBuilder resetAndGet() {
+        return new HgNodePartitionerBuilder();
+    }
+
+    /**
+     * @param nodeId
+     * @param keyCode
+     * @return
+     * @see HgNodePartitionerBuilder:setPartitions(Set<HgNodePartition> partitions)
+     */
+    @Deprecated
+    public HgNodePartitionerBuilder add(Long nodeId, Integer keyCode) {
+        isFalse(nodeId == null, "The argument is invalid: nodeId");
+        isFalse(keyCode == null, "The argument is invalid: keyCode");
+
+        if (this.partitions == null) {
+            this.partitions = new HashSet<>(16, 1);
+        }
+
+        this.partitions.add(HgNodePartition.of(nodeId, keyCode));
+        return this;
+    }
+
+    Collection<HgNodePartition> getPartitions() {
+        return this.partitions;
+    }
+
+    public void setPartitions(Set<HgNodePartition> partitions) {
+        isFalse(partitions == null, "The argument is invalid: partitions");
+        this.partitions = partitions;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java
new file mode 100644
index 0000000..ee73485
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/26
+ */
+public class HgPrivate {
+
+    private final static HgPrivate instance = new HgPrivate();
+
+    private HgPrivate() {
+
+    }
+
+    static HgPrivate getInstance() {
+        return instance;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java
new file mode 100644
index 0000000..31438c0
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/11
+ *
+ * @version 0.2.0
+ */
+public interface HgStoreNode {
+
+    /**
+     * Return boolean value of being online or not
+     *
+     * @return
+     */
+    default boolean isHealthy() {
+        return true;
+    }
+
+    /**
+     * Return the unique ID of store-node.
+     *
+     * @return
+     */
+    Long getNodeId();
+
+    /**
+     * A string value concatenated by host and port: "host:port"
+     *
+     * @return
+     */
+    String getAddress();
+
+    /**
+     * Return a new HgStoreSession instance, that is not Thread safe.
+     * Return null when the node is not in charge of the graph that was passed from argument.
+     *
+     * @return
+     */
+    HgStoreSession openSession(String graphName);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java
new file mode 100644
index 0000000..c35b5e9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/11
+ */
+public interface HgStoreNodeBuilder {
+
+    HgStoreNodeBuilder setNodeId(Long nodeId);
+
+    HgStoreNodeBuilder setAddress(String address);
+
+    /**
+     * To build a HgStoreNode instance.
+     *
+     * @return
+     */
+    HgStoreNode build();
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java
new file mode 100644
index 0000000..d8735cd
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.List;
+
+/**
+ * created on 2021/10/12
+ */
+public final class HgStoreNodeCandidates {
+
+    List<HgStoreNode> nodeList;
+
+    HgStoreNodeCandidates(List<HgStoreNode> nodeList) {
+        this.nodeList = nodeList;
+    }
+
+    public int size() {
+        return this.nodeList.size();
+    }
+
+    public HgStoreNode getNode(int index) {
+        return this.nodeList.get(index);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java
new file mode 100644
index 0000000..84709f1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.client.grpc.GrpcStoreNodeBuilder;
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * // TODO: Mapping to Store-Node-Cluster, one to one.
+ * <p>
+ * created on 2021/10/11
+ *
+ * @version 0.2.0
+ */
+@ThreadSafe
+@Slf4j
+public final class HgStoreNodeManager {
+
+    private final static Set<String> CLUSTER_ID_SET = new HashSet<>();
+    private final static HgStoreNodeManager instance = new HgStoreNodeManager();
+
+    private final String clusterId;
+    private final Map<String, HgStoreNode> addressMap = new ConcurrentHashMap<>();
+    private final Map<Long, HgStoreNode> nodeIdMap = new ConcurrentHashMap<>();
+    private final Map<String, List<HgStoreNode>> graphNodesMap = new ConcurrentHashMap<>();
+
+    private HgStoreNodeProvider nodeProvider;
+    private HgStoreNodePartitioner nodePartitioner;
+    private HgStoreNodeNotifier nodeNotifier;
+
+    private HgStoreNodeManager() {
+        this.clusterId = HgStoreClientConst.DEFAULT_NODE_CLUSTER_ID;
+    }
+
+    private HgStoreNodeManager(String clusterId) {
+        synchronized (CLUSTER_ID_SET) {
+            if (CLUSTER_ID_SET.contains(clusterId)) {
+                throw new RuntimeException("The cluster [" + clusterId + "] has been existing.");
+            }
+            CLUSTER_ID_SET.add(clusterId);
+            this.clusterId = clusterId;
+        }
+    }
+
+    public static HgStoreNodeManager getInstance() {
+        return instance;
+    }
+
+    /**
+     * Return the HgStoreNodeBuilder
+     *
+     * @return
+     */
+    public HgStoreNodeBuilder getNodeBuilder() {
+        // TODO: Constructed by a provider that retrieved by SPI
+        return new GrpcStoreNodeBuilder(this, HgPrivate.getInstance());
+    }
+
+    /**
+     * Return an instance of  HgStoreNode whose ID is matched to the argument.
+     *
+     * @param nodeId
+     * @return null when none of instance is matched to the argument,or argument is invalid.
+     */
+    public HgStoreNode getStoreNode(Long nodeId) {
+        if (nodeId == null) {
+            return null;
+        }
+        return this.nodeIdMap.get(nodeId);
+    }
+
+    /**
+     * Apply a HgStoreNode instance with graph-name and node-id.
+     * <b>CAUTION:</b>
+     * <b>It won't work when user haven't set a HgStoreNodeProvider via setNodeProvider method.</b>
+     *
+     * @param graphName
+     * @param nodeId
+     * @return
+     */
+    HgStoreNode applyNode(String graphName, Long nodeId) {
+        HgStoreNode node = this.nodeIdMap.get(nodeId);
+
+        if (node != null) {
+            return node;
+        }
+
+        if (this.nodeProvider == null) {
+            return null;
+        }
+
+        node = this.nodeProvider.apply(graphName, nodeId);
+
+        if (node == null) {
+
+            log.warn("Failed to apply a HgStoreNode instance form the nodeProvider [ "
+                     + this.nodeProvider.getClass().getName() + " ].");
+            notifying(graphName, nodeId, HgNodeStatus.NOT_EXIST);
+            return null;
+        }
+
+        this.addNode(graphName, node);
+
+        return node;
+    }
+
+    private void notifying(String graphName, Long nodeId, HgNodeStatus status) {
+        if (this.nodeNotifier != null) {
+            try {
+                this.nodeNotifier.notice(graphName, HgStoreNotice.of(nodeId, status));
+            } catch (Throwable t) {
+                log.error("Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() +
+                          ":notice(" + nodeId + "," + status + ")", t);
+            }
+        }
+    }
+
+    /**
+     * @param graphName
+     * @param notice
+     * @return null: when there is no HgStoreNodeNotifier in the nodeManager;
+     * @throws HgStoreClientException
+     */
+    public Integer notifying(String graphName, HgStoreNotice notice) {
+
+        if (this.nodeNotifier != null) {
+
+            synchronized (Thread.currentThread()) {
+                try {
+                    return this.nodeNotifier.notice(graphName, notice);
+                } catch (Throwable t) {
+                    String msg =
+                            "Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() +
+                            ", notice: [ " + notice + " ]";
+                    log.error(msg, t);
+                    throw new HgStoreClientException(msg);
+                }
+            }
+
+        }
+
+        return null;
+    }
+
+    /**
+     * Return a collection of HgStoreNode who is in charge of the graph passed in the argument.
+     *
+     * @param graphName
+     * @return null when none matched to argument or any argument is invalid.
+     */
+    public List<HgStoreNode> getStoreNodes(String graphName) {
+        if (HgAssert.isInvalid(graphName)) {
+            return null;
+        }
+
+        return this.graphNodesMap.get(graphName);
+    }
+
+    /**
+     * Adding a new Store-Node, return the argument's value if the host+port was not existing,
+     * otherwise return the HgStoreNode-instance added early.
+     *
+     * @param storeNode
+     * @return
+     * @throws IllegalArgumentException when any argument is invalid.
+     */
+    public HgStoreNode addNode(HgStoreNode storeNode) {
+        HgAssert.isFalse(storeNode == null, "the argument: storeNode is null.");
+
+        Long nodeId = storeNode.getNodeId();
+
+        HgStoreNode node = null;
+
+        synchronized (this.nodeIdMap) {
+            node = this.addressMap.get(nodeId);
+            if (node == null) {
+                node = storeNode;
+                this.nodeIdMap.put(nodeId, node);
+                this.addressMap.put(storeNode.getAddress(), node);
+            }
+        }
+
+        return node;
+    }
+
+    /**
+     * @param graphName
+     * @param storeNode
+     * @return
+     * @throws IllegalArgumentException when any argument is invalid.
+     */
+    public HgStoreNode addNode(String graphName, HgStoreNode storeNode) {
+        HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument is invalid: graphName");
+        HgStoreNode node = this.addNode(storeNode);
+
+        List<HgStoreNode> nodes = null;
+
+        synchronized (this.graphNodesMap) {
+            nodes = this.graphNodesMap.get(graphName);
+            if (nodes == null) {
+                nodes = new ArrayList<>();
+                this.graphNodesMap.put(graphName, nodes);
+            }
+            nodes.add(node);
+        }
+
+        return node;
+    }
+
+    public HgStoreNodePartitioner getNodePartitioner() {
+        return nodePartitioner;
+    }
+
+    public HgStoreNodeManager setNodePartitioner(HgStoreNodePartitioner nodePartitioner) {
+        HgAssert.isFalse(nodePartitioner == null, "the argument is invalid: nodePartitioner");
+        this.nodePartitioner = nodePartitioner;
+        return this;
+    }
+
+    public HgStoreNodeNotifier getNodeNotifier() {
+        return nodeNotifier;
+    }
+
+    public HgStoreNodeManager setNodeNotifier(HgStoreNodeNotifier nodeNotifier) {
+        HgAssert.isFalse(nodeNotifier == null, "the argument is invalid: nodeNotifier");
+        this.nodeNotifier = nodeNotifier;
+        return this;
+    }
+
+    public HgStoreNodeManager setNodeProvider(HgStoreNodeProvider nodeProvider) {
+        this.nodeProvider = nodeProvider;
+        return this;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java
new file mode 100644
index 0000000..0319d6c
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/12
+ *
+ * @version 1.0.0
+ */
+public interface HgStoreNodeNotifier {
+
+    /**
+     * It will be invoked by NodeManager, when some exception or issue was happened.
+     *
+     * @param graphName
+     * @param storeNotice
+     * @return return 0 please, for no matter what.
+     */
+    int notice(String graphName, HgStoreNotice storeNotice);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java
new file mode 100644
index 0000000..d540f68
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+/**
+ * created on 2021/10/12
+ *
+ * @version 1.0.0
+ */
+public interface HgStoreNodePartitioner {
+
+    /**
+     * The partition algorithm implementation, that specialized by user.
+     *
+     * @param builder   The builder of HgNodePartitionerBuilder. It's supposed to be invoked
+     *                  directly by user.
+     *                  <b>e.g. builder.add(nodeId,address,partitionId);</b>
+     * @param graphName
+     * @param startKey
+     * @param endKey
+     * @return status:
+     * <ul>
+     *     <li>0: The partitioner is OK.</li>
+     *     <li>10: The partitioner is not work.</li>
+     * </ul>
+     */
+    int partition(HgNodePartitionerBuilder builder, String graphName, byte[] startKey,
+                  byte[] endKey);
+
+    /**
+     * @param builder
+     * @param graphName
+     * @param startCode hash code
+     * @param endCode   hash code
+     * @return
+     */
+    default int partition(HgNodePartitionerBuilder builder, String graphName, int startCode,
+                          int endCode) {
+        return this.partition(builder, graphName
+                , HgStoreClientConst.ALL_PARTITION_OWNER
+                , HgStoreClientConst.ALL_PARTITION_OWNER);
+    }
+
+    default int partition(HgNodePartitionerBuilder builder, String graphName, int partitionId) {
+        return this.partition(builder, graphName
+                , HgStoreClientConst.ALL_PARTITION_OWNER
+                , HgStoreClientConst.ALL_PARTITION_OWNER);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java
new file mode 100644
index 0000000..dba939e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class HgStoreNodePartitionerImpl implements HgStoreNodePartitioner,
+                                                   HgStoreNodeProvider,
+                                                   HgStoreNodeNotifier {
+
+    private PDClient pdClient;
+    private HgStoreNodeManager nodeManager;
+
+    protected HgStoreNodePartitionerImpl() {
+    }
+
+    public HgStoreNodePartitionerImpl(PDClient pdClient, HgStoreNodeManager nodeManager) {
+        this.pdClient = pdClient;
+        this.nodeManager = nodeManager;
+    }
+
+    /**
+     * 查询分区信息,结果通过HgNodePartitionerBuilder返回
+     */
+    @Override
+    public int partition(HgNodePartitionerBuilder builder, String graphName,
+                         byte[] startKey, byte[] endKey) {
+        try {
+            HashSet<HgNodePartition> partitions = null;
+            if (HgStoreClientConst.ALL_PARTITION_OWNER == startKey) {
+                List<Metapb.Store> stores = pdClient.getActiveStores(graphName);
+                partitions = new HashSet<>(stores.size());
+                for (Metapb.Store store : stores) {
+                    partitions.add(HgNodePartition.of(store.getId(), -1));
+                }
+
+            } else if (endKey == HgStoreClientConst.EMPTY_BYTES
+                       || startKey == endKey || Arrays.equals(startKey, endKey)) {
+                KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                        pdClient.getPartition(graphName, startKey);
+                Metapb.Shard leader = partShard.getValue();
+                partitions = new HashSet<>();
+                partitions.add(HgNodePartition.of(leader.getStoreId(),
+                                                  pdClient.keyToCode(graphName, startKey)));
+            } else {
+                log.warn(
+                        "StartOwnerkey is not equal to endOwnerkey, which is meaningless!!, It is" +
+                        " a error!!");
+                List<Metapb.Store> stores = pdClient.getActiveStores(graphName);
+                partitions = new HashSet<>(stores.size());
+                for (Metapb.Store store : stores) {
+                    partitions.add(HgNodePartition.of(store.getId(), -1));
+                }
+            }
+            builder.setPartitions(partitions);
+        } catch (PDException e) {
+            log.error("An error occurred while getting partition information :{}", e.getMessage());
+            throw new RuntimeException(e.getMessage(), e);
+        }
+        return 0;
+    }
+
+    @Override
+    public int partition(HgNodePartitionerBuilder builder, String graphName,
+                         int startKey, int endKey) {
+        try {
+            HashSet<HgNodePartition> partitions = new HashSet<>();
+            Metapb.Partition partition = null;
+            while ((partition == null || partition.getEndKey() < endKey)
+                   && startKey < PartitionUtils.MAX_VALUE) {
+                KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                        pdClient.getPartitionByCode(graphName, startKey);
+                if (partShard != null) {
+                    partition = partShard.getKey();
+                    Metapb.Shard leader = partShard.getValue();
+                    partitions.add(HgNodePartition.of(leader.getStoreId(), startKey,
+                                                      (int) partition.getStartKey(),
+                                                      (int) partition.getEndKey()));
+                    startKey = (int) partition.getEndKey();
+                } else {
+                    break;
+                }
+            }
+            builder.setPartitions(partitions);
+        } catch (PDException e) {
+            log.error("An error occurred while getting partition information :{}", e.getMessage());
+            throw new RuntimeException(e.getMessage(), e);
+        }
+        return 0;
+    }
+
+    @Override
+    public int partition(HgNodePartitionerBuilder builder, String graphName,
+                         int partitionId) {
+        try {
+            HashSet<HgNodePartition> partitions = new HashSet<>();
+            Metapb.Partition partition = null;
+
+            KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                    pdClient.getPartitionById(graphName, partitionId);
+            if (partShard != null) {
+                partition = partShard.getKey();
+                Metapb.Shard leader = partShard.getValue();
+                partitions.add(
+                        HgNodePartition.of(leader.getStoreId(), (int) partition.getStartKey()));
+            }
+            builder.setPartitions(partitions);
+        } catch (PDException e) {
+            log.error("An error occurred while getting partition information :{}", e.getMessage());
+            throw new RuntimeException(e.getMessage(), e);
+        }
+        return 0;
+    }
+
+    /**
+     * 查询hgstore信息
+     *
+     * @return hgstore
+     */
+    @Override
+    public HgStoreNode apply(String graphName, Long nodeId) {
+        try {
+            Metapb.Store store = pdClient.getStore(nodeId);
+            return nodeManager.getNodeBuilder().setNodeId(store.getId())
+                              .setAddress(store.getAddress()).build();
+        } catch (PDException e) {
+            throw new RuntimeException(e.getMessage(), e);
+        }
+    }
+
+    /**
+     * 通知更新缓存
+     */
+    @Override
+    public int notice(String graphName, HgStoreNotice storeNotice) {
+        log.warn(storeNotice.toString());
+        if (storeNotice.getPartitionLeaders() != null) {
+            storeNotice.getPartitionLeaders().forEach((partId, leader) -> {
+                pdClient.updatePartitionLeader(graphName, partId, leader);
+                log.warn("updatePartitionLeader:{}-{}-{}",
+                         graphName, partId, leader);
+            });
+        }
+        if (storeNotice.getPartitionIds() != null) {
+            storeNotice.getPartitionIds().forEach(partId -> {
+                pdClient.invalidPartitionCache(graphName, partId);
+            });
+        }
+        if (!storeNotice.getNodeStatus().equals(
+                HgNodeStatus.PARTITION_COMMON_FAULT)
+            && !storeNotice.getNodeStatus().equals(
+                HgNodeStatus.NOT_PARTITION_LEADER)) {
+            pdClient.invalidPartitionCache();
+            log.warn("invalidPartitionCache:{} ", storeNotice.getNodeStatus());
+        }
+        return 0;
+    }
+
+    public Metapb.Graph delGraph(String graphName) {
+        try {
+            return pdClient.delGraph(graphName);
+        } catch (PDException e) {
+            log.error("delGraph {} exception, {}", graphName, e.getMessage());
+        }
+        return null;
+    }
+
+    public void setNodeManager(HgStoreNodeManager nodeManager) {
+        this.nodeManager = nodeManager;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java
new file mode 100644
index 0000000..2d0a7b5
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/27
+ */
+public interface HgStoreNodeProvider {
+
+    /**
+     * Applying a new HgStoreNode instance
+     *
+     * @param graphName
+     * @param nodeId
+     * @return
+     */
+    HgStoreNode apply(String graphName, Long nodeId);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java
new file mode 100644
index 0000000..17387ee
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/11
+ *
+ * @version 0.1.0
+ */
+public interface HgStoreNodeSession extends HgStoreSession {
+
+    /**
+     * Return the name of graph.
+     *
+     * @return
+     */
+    String getGraphName();
+
+    /**
+     * Return an instance of HgStoreNode, which provided the connection of Store-Node machine.
+     *
+     * @return
+     */
+    HgStoreNode getStoreNode();
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java
new file mode 100644
index 0000000..083cb8d
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.List;
+import java.util.Map;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.util.HgAssert;
+
+/**
+ * 2021/11/16
+ */
+@NotThreadSafe
+public class HgStoreNotice {
+
+    private final Long nodeId;
+    private final HgNodeStatus nodeStatus;
+    private final String message;
+    private Map<Integer, Long> partitionLeaders;
+    private List<Integer> partitionIds;
+
+    private HgStoreNotice(Long nodeId, HgNodeStatus nodeStatus, String message) {
+        this.nodeId = nodeId;
+        this.nodeStatus = nodeStatus;
+        this.message = message;
+    }
+
+    public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus) {
+        HgAssert.isArgumentNotNull(nodeId, "nodeId");
+        HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus");
+        return new HgStoreNotice(nodeId, nodeStatus, "");
+    }
+
+    public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus, String message) {
+        HgAssert.isArgumentNotNull(nodeId, "nodeId");
+        HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus");
+        HgAssert.isArgumentNotNull(message, "message");
+
+        return new HgStoreNotice(nodeId, nodeStatus, message);
+    }
+
+    public Long getNodeId() {
+        return nodeId;
+    }
+
+    public HgNodeStatus getNodeStatus() {
+        return nodeStatus;
+    }
+
+    public String getMessage() {
+        return message;
+    }
+
+    public Map<Integer, Long> getPartitionLeaders() {
+        return partitionLeaders;
+    }
+
+    public HgStoreNotice setPartitionLeaders(Map<Integer, Long> partitionLeaders) {
+        this.partitionLeaders = partitionLeaders;
+        return this;
+    }
+
+    public List<Integer> getPartitionIds() {
+        return partitionIds;
+    }
+
+    public HgStoreNotice setPartitionIds(List<Integer> partitionIds) {
+        this.partitionIds = partitionIds;
+        return this;
+    }
+
+    @Override
+    public String toString() {
+        return "HgStoreNotice{" +
+               "nodeId=" + nodeId +
+               ", nodeStatus=" + nodeStatus +
+               ", message='" + message + '\'' +
+               ", partitionLeaders=" + partitionLeaders +
+               ", partitionIds=" + partitionIds +
+               '}';
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java
new file mode 100644
index 0000000..c0e2be6
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+public class HgStoreService {
+
+    private static final HgStoreService instance = new HgStoreService();
+
+    private HgStoreService() {
+    }
+
+    static HgStoreService of() {
+        return instance;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java
new file mode 100644
index 0000000..37fa51c
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgSessionProvider;
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public class HgStoreSessionProvider implements HgSessionProvider {
+
+    private final MultiNodeSessionFactory sessionFactory = MultiNodeSessionFactory.getInstance();
+
+    @Override
+    public HgStoreSession createSession(String graphName) {
+        return this.sessionFactory.createStoreSession(graphName);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java
new file mode 100644
index 0000000..ab0c7fd
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgTkvEntry;
+
+/**
+ * created on 2021/10/14
+ */
+class HgTkvEntryImpl implements HgTkvEntry {
+
+    private final String table;
+    private final byte[] key;
+    private final byte[] value;
+
+    HgTkvEntryImpl(String table, byte[] key, byte[] value) {
+        this.table = table;
+        this.key = key;
+        this.value = value;
+    }
+
+    @Override
+    public String table() {
+        return this.table;
+    }
+
+    @Override
+    public byte[] key() {
+        return this.key;
+    }
+
+    @Override
+    public byte[] value() {
+        return this.value;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        HgTkvEntryImpl that = (HgTkvEntryImpl) o;
+        return Objects.equals(table, that.table) && Arrays.equals(key, that.key) &&
+               Arrays.equals(value, that.value);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = Objects.hash(table);
+        result = 31 * result + Arrays.hashCode(key);
+        result = 31 * result + Arrays.hashCode(value);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "HgTkvEntryImpl{" +
+               "table='" + table + '\'' +
+               ", key=" + Arrays.toString(key) +
+               ", value=" + Arrays.toString(value) +
+               '}';
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java
new file mode 100644
index 0000000..932864a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgTokvEntry;
+
+/**
+ * created on 2021/10/14
+ */
+class HgTokvEntryImpl implements HgTokvEntry {
+
+    private final String table;
+    private final HgOwnerKey ownerKey;
+    private final byte[] value;
+
+    HgTokvEntryImpl(String table, HgOwnerKey ownerKey, byte[] value) {
+        this.table = table;
+        this.ownerKey = ownerKey;
+        this.value = value;
+    }
+
+    @Override
+    public String table() {
+        return this.table;
+    }
+
+    @Override
+    public HgOwnerKey ownerKey() {
+        return this.ownerKey;
+    }
+
+    @Override
+    public byte[] value() {
+        return this.value;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        HgTokvEntryImpl that = (HgTokvEntryImpl) o;
+        return Objects.equals(table, that.table) && Objects.equals(ownerKey, that.ownerKey) &&
+               Arrays.equals(value, that.value);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = Objects.hash(table, ownerKey);
+        result = 31 * result + Arrays.hashCode(value);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "HgTokvEntryImpl{" +
+               "table='" + table + '\'' +
+               ", okv=" + ownerKey +
+               ", value=" + Arrays.toString(value) +
+               '}';
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java
new file mode 100644
index 0000000..ff7cde0
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public final class MultiNodeSessionFactory {
+
+    // TODO multi-instance ?
+    private final static MultiNodeSessionFactory INSTANCE = new MultiNodeSessionFactory();
+    // TODO multi-instance ?
+    private final HgStoreNodeManager nodeManager = HgStoreNodeManager.getInstance();
+    // TODO: to be a chain assigned to each graph
+    //private HgStoreNodeDispatcher storeNodeDispatcher;
+
+    private MultiNodeSessionFactory() {
+    }
+
+    static MultiNodeSessionFactory getInstance() {
+        return INSTANCE;
+    }
+
+    HgStoreSession createStoreSession(String graphName) {
+        return buildProxy(graphName);
+    }
+
+    private HgStoreSession buildProxy(String graphName) {
+        //return new MultiNodeSessionProxy(graphName, nodeManager, storeNodeDispatcher);
+        //return new NodePartitionSessionProxy(graphName,nodeManager);
+        //return new NodeRetrySessionProxy(graphName,nodeManager);
+        return new NodeTxSessionProxy(graphName, nodeManager);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java
new file mode 100644
index 0000000..e78ced4
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Objects;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/26
+ */
+@ThreadSafe
+class NodeTkv {
+
+    private final HgNodePartition nodePartition;
+    private final String table;
+    private final HgOwnerKey key;
+    private final HgOwnerKey endKey;
+    private HgStoreSession session;
+
+    NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key) {
+        this.nodePartition = nodePartition;
+        this.table = table;
+        this.key = key;
+        this.endKey = key;
+        this.key.setKeyCode(this.nodePartition.getKeyCode());
+    }
+
+    NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key, int keyCode) {
+        this.nodePartition = nodePartition;
+        this.table = table;
+        this.key = key;
+        this.endKey = key;
+
+        this.key.setKeyCode(keyCode);
+    }
+
+    NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey startKey,
+            HgOwnerKey endKey) {
+        this.nodePartition = nodePartition;
+        this.table = table;
+        this.key = startKey;
+        this.endKey = endKey;
+        this.key.setKeyCode(nodePartition.getStartKey());
+        this.endKey.setKeyCode(nodePartition.getEndKey());
+    }
+
+    public Long getNodeId() {
+        return this.nodePartition.getNodeId();
+    }
+
+    public String getTable() {
+        return table;
+    }
+
+    public HgOwnerKey getKey() {
+        return key;
+    }
+
+    public HgOwnerKey getEndKey() {
+        return endKey;
+    }
+
+    public NodeTkv setKeyCode(int code) {
+        this.key.setKeyCode(code);
+        return this;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        NodeTkv nptKv = (NodeTkv) o;
+        return Objects.equals(nodePartition, nptKv.nodePartition) &&
+               Objects.equals(table, nptKv.table)
+               && Objects.equals(key, nptKv.key)
+               && Objects.equals(endKey, nptKv.endKey);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = Objects.hash(nodePartition, table, key, endKey);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "NptKv{" +
+               "nodePartition=" + nodePartition +
+               ", table='" + table + '\'' +
+               ", key=" + key +
+               ", endKey=" + endKey +
+               '}';
+    }
+
+    public HgStoreSession getSession() {
+        return session;
+    }
+
+    public void setSession(HgStoreSession session) {
+        this.session = session;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java
new file mode 100644
index 0000000..01eea1a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_LIST;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.NODE_MAX_RETRYING_TIMES;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.TX_SESSIONS_MAP_CAPACITY;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collector;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.term.HgPair;
+import org.apache.hugegraph.store.term.HgTriple;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2021/11/18
+ */
+@Slf4j
+@NotThreadSafe
+final class NodeTxExecutor {
+
+    private static final String maxTryMsg =
+            "the number of retries reached the upper limit : " + NODE_MAX_RETRYING_TIMES +
+            ",caused by:";
+    private static final String msg =
+            "Not all tx-data delivered to real-node-session successfully.";
+
+    static {
+        System.setProperty("java.util.concurrent.ForkJoinPool.common.parallelism",
+                           String.valueOf(Runtime.getRuntime().availableProcessors() * 2));
+    }
+
+    private final String graphName;
+    NodeTxSessionProxy proxy;
+    Collector<NodeTkv, ?, Map<Long, List<HgOwnerKey>>> collector = Collectors.groupingBy(
+            nkv -> nkv.getNodeId(), Collectors.mapping(NodeTkv::getKey, Collectors.toList()));
+    private Map<Long, HgStoreSession> sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1);
+    private boolean isTx;
+    private List<HgPair<HgTriple<String, HgOwnerKey, Object>,
+            Function<NodeTkv, Boolean>>> entries = new LinkedList<>();
+
+    private NodeTxExecutor(String graphName, NodeTxSessionProxy proxy) {
+        this.graphName = graphName;
+        this.proxy = proxy;
+    }
+
+    static NodeTxExecutor graphOf(String graphName, NodeTxSessionProxy proxy) {
+        return new NodeTxExecutor(graphName, proxy);
+    }
+
+    public boolean isTx() {
+        return isTx;
+    }
+
+    void setTx(boolean tx) {
+        isTx = tx;
+    }
+
+    void commitTx() {
+        if (!this.isTx) {
+            throw new IllegalStateException("It's not in tx state");
+        }
+
+        this.doCommit();
+    }
+
+    void rollbackTx() {
+        if (!this.isTx) {
+            return;
+        }
+        try {
+            this.sessions.values().stream().filter(HgStoreSession::isTx)
+                         .forEach(HgStoreSession::rollback);
+        } catch (Throwable t) {
+            throw t;
+        } finally {
+            this.isTx = false;
+            this.sessions.clear();
+        }
+    }
+
+    void doCommit() {
+        try {
+            this.retryingInvoke(() -> {
+                if (this.entries.isEmpty()) {
+                    return true;
+                }
+                AtomicBoolean allSuccess = new AtomicBoolean(true);
+                for (HgPair<HgTriple<String, HgOwnerKey, Object>, Function<NodeTkv, Boolean>> e :
+                        this.entries) {
+                    doAction(e.getKey(), e.getValue());
+                }
+                if (!allSuccess.get()) {
+                    throw HgStoreClientException.of(msg);
+                }
+                AtomicReference<Throwable> throwable = new AtomicReference<>();
+                Collection<HgStoreSession> sessions = this.sessions.values();
+                sessions.parallelStream().forEach(e -> {
+                    if (e.isTx()) {
+                        try {
+                            e.commit();
+                        } catch (Throwable t) {
+                            throwable.compareAndSet(null, t);
+                            allSuccess.set(false);
+                        }
+                    }
+                });
+                if (!allSuccess.get()) {
+                    if (isTx) {
+                        try {
+                            sessions.stream().forEach(HgStoreSession::rollback);
+                        } catch (Exception e) {
+
+                        }
+                    }
+                    Throwable cause = throwable.get();
+                    if (cause.getCause() != null) {
+                        cause = cause.getCause();
+                    }
+                    if (cause instanceof HgStoreClientException) {
+                        throw (HgStoreClientException) cause;
+                    }
+                    throw HgStoreClientException.of(cause);
+                }
+                return true;
+            });
+
+        } catch (Throwable t) {
+            throw t;
+        } finally {
+            this.isTx = false;
+            this.entries = new LinkedList<>();
+            this.sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1);
+        }
+    }
+
+    // private Function<HgTriple<String, HgOwnerKey, Object>,
+    //        List<HgPair<HgStoreNode, NodeTkv>>> nodeStreamWrapper = nodeParams -> {
+    //    if (nodeParams.getZ() == null) {
+    //        return this.proxy.getNode(nodeParams.getX(),
+    //                                  nodeParams.getY());
+    //    } else {
+    //        if (nodeParams.getZ() instanceof HgOwnerKey) {
+    //            return this.proxy.getNode(nodeParams.getX(),
+    //                                      nodeParams.getY(),
+    //                                      (HgOwnerKey) nodeParams.getZ());
+    //        } if ( nodeParams.getZ() instanceof Integer ){
+    //            return this.proxy.doPartition(nodeParams.getX(), (Integer) nodeParams.getZ())
+    //                             .stream()
+    //                             .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(),
+    //                             nodeParams.getY()
+    //                             .getKeyCode()))
+    //                             .map(
+    //                                     e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId
+    //                                     ()), e)
+    //                                 );
+    //        }else {
+    //            HgAssert.isTrue(nodeParams.getZ() instanceof byte[],
+    //                            "Illegal parameter to get node id");
+    //            throw new NotImplementedException();
+    //        }
+    //    }
+    // };
+
+    // private Function<HgTriple<String, HgOwnerKey, Object>,
+    //        List<HgPair<HgStoreNode, NodeTkv>>> nodeStreamWrapper = nodeParams -> {
+    //    if (nodeParams.getZ() == null) {
+    //        return this.proxy.getNode(nodeParams.getX(), nodeParams.getY());
+    //    } else {
+    //        if (nodeParams.getZ() instanceof HgOwnerKey) {
+    //            return this.proxy.getNode(nodeParams.getX(), nodeParams.getY(),
+    //                                      (HgOwnerKey) nodeParams.getZ());
+    //        }
+    //        if (nodeParams.getZ() instanceof Integer) {
+    //            Collection<HgNodePartition> nodePartitions = this.proxy.doPartition(nodeParams
+    //            .getX(),
+    //                                                                                (Integer)
+    //                                                                                nodeParams
+    //                                                                                .getZ());
+    //            ArrayList<HgPair<HgStoreNode, NodeTkv>> hgPairs = new ArrayList<>
+    //            (nodePartitions.size());
+    //            for (HgNodePartition nodePartition : nodePartitions) {
+    //                NodeTkv nodeTkv = new NodeTkv(nodePartition, nodeParams.getX(), nodeParams
+    //                .getY(),
+    //                                              nodeParams.getY().getKeyCode());
+    //                hgPairs.add(new HgPair<>(this.proxy.getStoreNode(nodeTkv.getNodeId()),
+    //                nodeTkv));
+    //
+    //            }
+    //            return hgPairs;
+    //        } else {
+    //            HgAssert.isTrue(nodeParams.getZ() instanceof byte[], "Illegal parameter to get
+    //            node id");
+    //            throw new RuntimeException("not implemented");
+    //        }
+    //    }
+    // };
+
+    private boolean doAction(HgTriple<String, HgOwnerKey, Object> nodeParams,
+                             Function<NodeTkv, Boolean> action) {
+        if (nodeParams.getZ() == null) {
+            return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(), nodeParams.getY(),
+                                       action);
+        } else {
+            if (nodeParams.getZ() instanceof HgOwnerKey) {
+                boolean result = this.proxy.doAction(nodeParams.getX(), nodeParams.getY(),
+                                                     (HgOwnerKey) nodeParams.getZ(), action);
+                return result;
+            }
+            if (nodeParams.getZ() instanceof Integer) {
+                return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(),
+                                           (Integer) nodeParams.getZ(), action);
+            } else {
+                HgAssert.isTrue(nodeParams.getZ() instanceof byte[],
+                                "Illegal parameter to get node id");
+                throw new RuntimeException("not implemented");
+            }
+        }
+    }
+
+    boolean prepareTx(HgTriple<String, HgOwnerKey, Object> nodeParams,
+                      Function<NodeTkv, Boolean> sessionMapper) {
+        if (this.isTx) {
+            return this.entries.add(new HgPair(nodeParams, sessionMapper));
+        } else {
+            return this.isAllTrue(nodeParams, sessionMapper);
+        }
+    }
+
+    public HgStoreSession openNodeSession(HgStoreNode node) {
+        HgStoreSession res = this.sessions.get(node.getNodeId());
+        if (res == null) {
+            this.sessions.put(node.getNodeId(), (res = node.openSession(this.graphName)));
+        }
+        if (this.isTx) {
+            res.beginTx();
+        }
+
+        return res;
+    }
+
+    <R> R limitOne(
+            Supplier<Stream<HgPair<HgStoreNode, NodeTkv>>> nodeStreamSupplier,
+            Function<SessionData<NodeTkv>, R> sessionMapper, R emptyObj) {
+
+        Optional<R> res = retryingInvoke(
+                () -> nodeStreamSupplier.get()
+                                        .parallel()
+                                        .map(
+                                                pair -> new SessionData<NodeTkv>(
+                                                        openNodeSession(pair.getKey()),
+                                                        pair.getValue())
+                                        ).map(sessionMapper)
+                                        .filter(
+                                                r -> isValid(r)
+                                        )
+                                        .findAny()
+                                        .orElseGet(() -> emptyObj)
+        );
+        return res.orElse(emptyObj);
+    }
+
+    <R> List<R> toList(Function<Long, HgStoreNode> nodeFunction
+            , List<HgOwnerKey> keyList
+            , Function<HgOwnerKey, Stream<NodeTkv>> flatMapper
+            , Function<SessionData<List<HgOwnerKey>>, List<R>> sessionMapper) {
+        Optional<List<R>> res = retryingInvoke(
+                () -> keyList.stream()
+                             .flatMap(flatMapper)
+                             .collect(collector)
+                             .entrySet()
+                             .stream()
+                             .map(
+                                     e -> new SessionData<>
+                                             (
+                                                     openNodeSession(
+                                                             nodeFunction.apply(e.getKey())),
+                                                     e.getValue()
+                                             )
+                             )
+                             .parallel()
+                             .map(sessionMapper)
+                             .flatMap(
+                                     e -> e.stream()
+                             )
+                             //.distinct()
+                             .collect(Collectors.toList())
+        );
+
+        return res.orElse(EMPTY_LIST);
+    }
+
+    private boolean isAllTrue(HgTriple<String, HgOwnerKey, Object> nodeParams,
+                              Function<NodeTkv, Boolean> action) {
+        Optional<Boolean> res = retryingInvoke(() -> doAction(nodeParams, action));
+        return res.orElse(false);
+    }
+
+    boolean isAllTrue(Supplier<Stream<HgPair<HgStoreNode, NodeTkv>>> dataSource,
+                      Function<SessionData<NodeTkv>, Boolean> action) {
+        Optional<Boolean> res = retryingInvoke(
+                () -> dataSource.get()
+                                .parallel()
+                                .map(
+                                        pair -> new SessionData<NodeTkv>(
+                                                openNodeSession(pair.getKey()),
+                                                pair.getValue())
+                                ).map(action)
+                                .allMatch(Boolean::booleanValue)
+        );
+
+        return res.orElse(false);
+    }
+
+    boolean ifAnyTrue(Supplier<Stream<HgPair<HgStoreNode, NodeTkv>>> nodeStreamSupplier
+            , Function<SessionData<NodeTkv>, Boolean> sessionMapper) {
+
+        Optional<Boolean> res = retryingInvoke(
+                () -> nodeStreamSupplier.get()
+                                        .parallel()
+                                        .map(
+                                                pair -> new SessionData<NodeTkv>(
+                                                        openNodeSession(pair.getKey()),
+                                                        pair.getValue())
+                                        )
+                                        .map(sessionMapper)
+                                        .anyMatch(Boolean::booleanValue)
+        );
+
+        return res.orElse(false);
+    }
+
+    <T> Optional<T> retryingInvoke(Supplier<T> supplier) {
+        return IntStream.rangeClosed(0, NODE_MAX_RETRYING_TIMES).boxed()
+                        .map(
+                                i -> {
+                                    T buffer = null;
+                                    try {
+                                        buffer = supplier.get();
+                                    } catch (Throwable t) {
+                                        if (i + 1 <= NODE_MAX_RETRYING_TIMES) {
+                                            try {
+                                                int sleepTime;
+                                                // 前三次每隔一秒做一次尝试
+                                                if (i < 3) {
+                                                    sleepTime = 1;
+                                                } else {
+                                                    // 后面逐次递增
+                                                    sleepTime = i - 1;
+                                                }
+                                                log.info("Waiting {} seconds " +
+                                                         "for the next try.",
+                                                         sleepTime);
+                                                Thread.sleep(sleepTime * 1000L);
+                                            } catch (InterruptedException e) {
+                                                log.error("Failed to sleep", e);
+                                            }
+                                        } else {
+                                            log.error(maxTryMsg, t);
+                                            throw HgStoreClientException.of(
+                                                    t.getMessage(), t);
+                                        }
+                                    }
+                                    return buffer;
+                                }
+                        )
+                        .filter(e -> e != null)
+                        .findFirst();
+
+    }
+
+    private boolean isValid(Object obj) {
+        if (obj == null) {
+            return false;
+        }
+
+        if (HgStoreClientConst.EMPTY_BYTES.equals(obj)) {
+            return false;
+        }
+
+        return !EMPTY_LIST.equals(obj);
+    }
+
+    class SessionData<T> {
+
+        HgStoreSession session;
+        T data;
+
+        SessionData(HgStoreSession session, T data) {
+            this.session = session;
+            this.data = data;
+        }
+
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java
new file mode 100644
index 0000000..066f968
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java
@@ -0,0 +1,887 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static java.util.stream.Collectors.groupingBy;
+import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentNotNull;
+import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentValid;
+import static org.apache.hugegraph.store.client.util.HgAssert.isFalse;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_STRING;
+import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.err;
+import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.toStr;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.grpc.KvBatchScanner;
+import org.apache.hugegraph.store.client.grpc.KvCloseableIterator;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq.Builder;
+import org.apache.hugegraph.store.term.HgPair;
+import org.apache.hugegraph.store.term.HgTriple;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/01/19
+ *
+ * @version 0.6.0 added batch scan on 2022/03/03
+ */
+@Slf4j
+@NotThreadSafe
+class NodeTxSessionProxy implements HgStoreSession {
+
+    private final HgStoreNodeManager nodeManager;
+    private final HgStoreNodePartitioner nodePartitioner;
+    private final String graphName;
+    private final NodeTxExecutor txExecutor;
+
+    NodeTxSessionProxy(String graphName, HgStoreNodeManager nodeManager) {
+        this.nodeManager = nodeManager;
+        this.graphName = graphName;
+        this.nodePartitioner = this.nodeManager.getNodePartitioner();
+        this.txExecutor = NodeTxExecutor.graphOf(this.graphName, this);
+
+        isFalse(this.nodePartitioner == null,
+                "Failed to retrieve the node-partitioner from node-manager.");
+    }
+
+    @Override
+    public void beginTx() {
+        this.txExecutor.setTx(true);
+    }
+
+    @Override
+    public void commit() {
+        this.txExecutor.commitTx();
+    }
+
+    @Override
+    public void rollback() {
+        this.txExecutor.rollbackTx();
+    }
+
+    @Override
+    public boolean isTx() {
+        return this.txExecutor.isTx();
+    }
+
+    @Override
+    public boolean put(String table, HgOwnerKey ownerKey, byte[] value) {
+        // isArgumentValid(table, "table");
+        // isArgumentNotNull(ownerKey, "ownerKey");
+        // log.info("put -> graph: {}, table: {}, key: {}, value: {}",
+        // graphName, table, ownerKey, toByteStr(value));
+        // return this.txExecutor.prepareTx(
+        //        () -> getNodeStream(table, ownerKey),
+        //        e -> e.session.put(table, e.data.getKey(), value)
+        // );
+        return this.txExecutor.prepareTx(new HgTriple(table, ownerKey, null),
+                                         e -> e.getSession().put(table,
+                                                                 e.getKey(),
+                                                                 value));
+    }
+
+    @Override
+    public boolean directPut(String table, int partitionId, HgOwnerKey ownerKey, byte[] value) {
+        isArgumentValid(table, "table");
+        isArgumentNotNull(ownerKey, "ownerKey");
+
+        return this.txExecutor.prepareTx(
+                new HgTriple(table, ownerKey, partitionId),
+                e -> e.getSession().put(table, e.getKey(), value)
+        );
+    }
+
+    @Override
+    public boolean delete(String table, HgOwnerKey ownerKey) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey");
+
+        if (log.isDebugEnabled()) {
+            log.debug("delete -> graph: {}, table: {}, key: {}"
+                    , graphName, table, toStr(ownerKey));
+        }
+
+        return this.txExecutor
+                .prepareTx(
+                        new HgTriple(table, ownerKey, null),
+                        e -> e.getSession().delete(table, e.getKey())
+                );
+    }
+
+    @Override
+    public boolean deleteSingle(String table, HgOwnerKey ownerKey) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey");
+
+        if (log.isDebugEnabled()) {
+            log.debug("deleteSingle -> graph: {}, table: {}, key: {}"
+                    , graphName, table, toStr(ownerKey));
+        }
+
+        return this.txExecutor
+                .prepareTx(
+                        new HgTriple(table, ownerKey, null),
+                        e -> e.getSession().deleteSingle(table, e.getKey())
+                );
+    }
+
+    @Override
+    public boolean deletePrefix(String table, HgOwnerKey prefix) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(prefix == null, "The argument is invalid: prefix");
+
+        if (log.isDebugEnabled()) {
+            log.debug("deletePrefix -> graph: {}, table: {}, prefix: {}"
+                    , graphName, table, toStr(prefix));
+        }
+
+        return this.txExecutor
+                .prepareTx(
+                        new HgTriple(table, prefix, null),
+                        e -> e.getSession().deletePrefix(table, e.getKey())
+                );
+    }
+
+    @Override
+    public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(start == null, "The argument is invalid: start");
+        HgAssert.isFalse(end == null, "The argument is invalid: end");
+
+        if (log.isDebugEnabled()) {
+            log.debug("deleteRange -> graph: {}, table: {}, start: {}, end: {}"
+                    , graphName, table, toStr(start), toStr(end));
+        }
+
+        return this.txExecutor
+                .prepareTx(
+                        new HgTriple(table, start, end),
+                        e -> e.getSession().deleteRange(table, e.getKey(), e.getEndKey())
+                );
+    }
+
+    @Override
+    public boolean merge(String table, HgOwnerKey key, byte[] value) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(key == null, "The argument is invalid: key");
+        HgAssert.isFalse(value == null, "The argument is invalid: value");
+
+        if (log.isDebugEnabled()) {
+            log.debug("merge -> graph: {}, table: {}, key: {}, value: {}"
+                    , graphName, table, toStr(key), toStr(value));
+        }
+
+        return this.txExecutor
+                .prepareTx(
+                        new HgTriple(table, key, value),
+                        e -> e.getSession().merge(table, e.getKey(), value)
+                );
+    }
+
+    /*--- tx end ---*/
+
+    @Override
+    public byte[] get(String table, HgOwnerKey ownerKey) {
+        isArgumentValid(table, "table");
+        isArgumentNotNull(ownerKey, "ownerKey");
+
+        return this.txExecutor
+                .limitOne(
+                        () -> this.getNodeStream(table, ownerKey),
+                        e -> e.session.get(table, e.data.getKey()), HgStoreClientConst.EMPTY_BYTES
+                );
+    }
+
+    @Override
+    public boolean clean(int partId) {
+        Collection<HgNodePartition> nodes = this.doPartition("", partId);
+        return nodes.parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .clean(partId)
+                    ).findFirst().get();
+    }
+
+    @Override
+    @Deprecated
+    public List<HgKvEntry> batchGetOwner(String table, List<HgOwnerKey> keyList) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList");
+
+        return this.txExecutor
+                .toList(
+                        (l) -> this.getStoreNode(l),
+                        keyList,
+                        key -> this.toNodeTkvList(table, key, key).stream(),
+                        e -> e.session.batchGetOwner(table, e.data)
+                );
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> batchPrefix(String table, List<HgOwnerKey> keyList) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList");
+        return this.toHgKvIteratorProxy(
+                this.txExecutor
+                        .toList(
+                                (l) -> this.getStoreNode(l),
+                                keyList,
+                                key -> this.toNodeTkvList(table, key, key).stream(),
+                                e -> Collections.singletonList(e.session.batchPrefix(table, e.data))
+                        )
+                , Long.MAX_VALUE);
+    }
+
+    @Override
+    public boolean truncate() {
+        return this.txExecutor
+                .isAllTrue(
+                        () -> this.getNodeStream(EMPTY_STRING),
+                        e -> e.session.truncate()
+                );
+    }
+
+    @Override
+    public boolean existsTable(String table) {
+        return this.txExecutor
+                .ifAnyTrue(
+                        () -> this.getNodeStream(EMPTY_STRING),
+                        e -> e.session.existsTable(table)
+                );
+    }
+
+    @Override
+    public boolean createTable(String table) {
+        return this.txExecutor
+                .isAllTrue(
+                        () -> this.getNodeStream(EMPTY_STRING),
+                        e -> e.session.createTable(table)
+                );
+    }
+
+    @Override
+    public boolean deleteTable(String table) {
+        return this.txExecutor
+                .isAllTrue(
+                        () -> this.getNodeStream(EMPTY_STRING),
+                        e -> e.session.deleteTable(table)
+                );
+    }
+
+    @Override
+    public boolean dropTable(String table) {
+        return this.txExecutor
+                .isAllTrue(
+                        () -> this.getNodeStream(table),
+                        e -> e.session.dropTable(table)
+                );
+    }
+
+    @Override
+    public boolean deleteGraph(String graph) {
+        return this.txExecutor
+                .isAllTrue(
+                        () -> this.getNodeStream(EMPTY_STRING),
+                        e -> e.session.deleteGraph(graph)
+                );
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table) {
+        return scanIterator(table, 0);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, byte[] query) {
+        return scanIterator(table, 0, query);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, long limit) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), limit)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, long limit, byte[] query) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), limit, query)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix) {
+        return scanIterator(table, keyPrefix, 0);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, keyPrefix)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), limit)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit,
+                                                byte[] query) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, keyPrefix)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), limit, query)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey) {
+        return this.scanIterator(table, startKey, endKey, 0, null);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey, long limit) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+        HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, startKey, endKey)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey
+            , long limit, byte[] query) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+        HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, startKey, endKey)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit,
+                                                   query)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey
+            , long limit, int scanType, byte[] query) {
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+        HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, startKey, endKey)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit,
+                                                   scanType, query)
+                    )
+                    .collect(Collectors.toList())
+                , limit);
+
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, int codeFrom, int codeTo,
+                                                int scanType, byte[] query) {
+        if (log.isDebugEnabled()) {
+            log.debug("graph: {}, table: {}, codeFrom: {}, codeTo: {}, scanType: {}, query: {}"
+                    , graphName, table, codeFrom, codeTo, scanType, HgStoreClientUtil.toStr(query));
+        }
+
+        HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+        return this.toHgKvIteratorProxy(
+                this.toNodeTkvList(table, codeFrom, codeTo)
+                    .parallelStream()
+                    .map(
+                            e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                     .scanIterator(e.getTable()
+                                             , e.getKey().getKeyCode()
+                                             , e.getEndKey().getKeyCode(),
+                                                   scanType, query)
+                    )
+                    .collect(Collectors.toList())
+                , 0);
+
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(Builder scanReqBuilder) {
+        List<NodeTkv> nodeTKvs = this.toNodeTkvList(scanReqBuilder);
+        Function<NodeTkv, HgKvIterator<HgKvEntry>> hgKvIteratorFunction = e -> {
+            HgStoreSession session = this.getStoreNode(e.getNodeId())
+                                         .openSession(this.graphName);
+            return session.scanIterator(scanReqBuilder);
+        };
+        List<HgKvIterator> iterators = nodeTKvs.parallelStream()
+                                               .map(hgKvIteratorFunction)
+                                               .collect(Collectors.toList());
+        return this.toHgKvIteratorProxy(iterators, scanReqBuilder.getLimit());
+    }
+
+    @Override
+    public long count(String table) {
+        return this.toNodeTkvList(table)
+                   .parallelStream()
+                   .map(
+                           e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+                                    .count(e.getTable())
+                   )
+                   .collect(Collectors.summingLong(l -> l));
+    }
+
+    @Override
+    public List<HgKvIterator<HgKvEntry>> scanBatch(HgScanQuery scanQuery) {
+        HgAssert.isArgumentNotNull(scanQuery, "scanQuery");
+
+        return this.toTkvMapFunc(scanQuery.getScanMethod())
+                   .apply(scanQuery)
+                   .entrySet()
+                   .stream()
+                   .map(e ->
+                                this.getStoreNode(e.getKey())
+                                    .openSession(this.graphName)
+                                    .scanBatch(toScanQueryFunc(scanQuery.getScanMethod())
+                                                       .apply(scanQuery.getTable(), e.getValue())
+                                                       .setQuery(scanQuery.getQuery())
+                                                       .setLimit(scanQuery.getLimit())
+                                                       .setPerKeyLimit(scanQuery.getPerKeyLimit())
+                                                       .setPerKeyMax((scanQuery.getPerKeyMax()))
+                                                       .setScanType(scanQuery.getScanType())
+                                                       .build()
+                                    )
+                   )
+                   //.peek(e->log.info("{}",e))
+                   .flatMap(List::stream)
+                   .collect(Collectors.toList());
+
+    }
+
+    @Override
+    public KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch2(HgScanQuery scanQuery) {
+        return scanBatch3(scanQuery, null);
+    }
+
+    @Override
+    public KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch3(HgScanQuery scanQuery,
+                                                                   KvCloseableIterator iterator) {
+        KvCloseableIterator notifierWrap = KvBatchScanner.ofMerger(scanQuery, (query, notifier) -> {
+            Map<Long, List<NodeTkv>> nodeTkvs = this.toTkvMapFunc(scanQuery.getScanMethod())
+                                                    .apply(query);
+
+            nodeTkvs.forEach((storeId, tkvs) -> {
+                this.getStoreNode(storeId)
+                    .openSession(this.graphName)
+                    .scanBatch3(toScanQueryFunc(scanQuery.getScanMethod())
+                                        .apply(scanQuery.getTable(), tkvs)
+                                        .setQuery(scanQuery.getQuery())
+                                        .setLimit(scanQuery.getLimit())
+                                        .setSkipDegree(scanQuery.getSkipDegree())
+                                        .setPerKeyLimit(scanQuery.getPerKeyLimit())
+                                        .setPerKeyMax((scanQuery.getPerKeyMax()))
+                                        .setScanType(scanQuery.getScanType())
+                                        .setOrderType(scanQuery.getOrderType())
+                                        .build(), notifier
+                    );
+            });
+            return true;
+        });
+        return notifierWrap;
+    }
+
+    private Function<HgScanQuery, Map<Long, List<NodeTkv>>> toTkvMapFunc(
+            HgScanQuery.ScanMethod scanMethod) {
+        switch (scanMethod) {
+            case RANGE:
+                return scanQuery -> {
+                    List<HgOwnerKey> starts = scanQuery.getStartList();
+                    List<HgOwnerKey> ends = scanQuery.getEndList();
+                    int size = starts.size();
+                    return IntStream.range(0, size)
+                                    .boxed()
+                                    .map(i -> this.toNodeTkvList(scanQuery.getTable(),
+                                                                 starts.get(i), ends.get(i)))
+                                    .flatMap(List::stream)
+                                    .collect(groupingBy(NodeTkv::getNodeId));
+                };
+            case PREFIX:
+                return scanQuery ->
+                        scanQuery.getPrefixList()
+                                 .stream()
+                                 .map(keyPrefix -> this.toNodeTkvList(scanQuery.getTable(),
+                                                                      keyPrefix))
+                                 .flatMap(List::stream)
+                                 .collect(groupingBy(NodeTkv::getNodeId));
+
+            default:
+                return scanQuery -> this.toNodeTkvList(scanQuery.getTable())
+                                        .stream()
+                                        .collect(groupingBy(NodeTkv::getNodeId));
+        }
+    }
+
+    private BiFunction<String, List<NodeTkv>, HgScanQuery.ScanBuilder> toScanQueryFunc(
+            HgScanQuery.ScanMethod scanMethod) {
+        switch (scanMethod) {
+            case RANGE:
+                return (table, tkvList) -> {
+                    List<HgOwnerKey> startList = new LinkedList();
+                    List<HgOwnerKey> endList = new LinkedList();
+
+                    tkvList.stream().forEach(e -> {
+                        startList.add(e.getKey());
+                        endList.add(e.getEndKey());
+                    });
+
+                    return HgScanQuery.ScanBuilder.rangeOf(table, startList, endList);
+                };
+            case PREFIX:
+                return (table, tkvList) ->
+                        HgScanQuery.ScanBuilder.prefixOf(table,
+                                                         tkvList.stream()
+                                                                .map(e -> e.getKey())
+                                                                .collect(Collectors.toList())
+                        );
+            default:
+                return (table, tkvList) -> HgScanQuery.ScanBuilder.tableOf(table);
+        }
+
+    }
+
+    /*-- common --*/
+    private HgKvIterator toHgKvIteratorProxy(List<HgKvIterator> iteratorList, long limit) {
+        boolean isAllOrderedLimiter = iteratorList.stream()
+                                                  .allMatch(
+                                                          e -> e instanceof HgKvOrderedIterator);
+
+        HgKvIterator<HgKvEntry> iterator;
+        if (isAllOrderedLimiter) {
+            iterator = new SequencedIterator(iteratorList.stream()
+                                                         .map(e -> (HgKvOrderedIterator) e)
+                                                         .collect(Collectors.toList()), limit);
+        } else {
+            iterator = new TopWorkIteratorProxy(iteratorList, limit);
+        }
+
+        return iterator;
+    }
+
+    HgStoreNode getStoreNode(Long nodeId) {
+        HgStoreNode res = this.nodeManager.applyNode(this.graphName, nodeId);
+
+        if (res == null) {
+            throw err("Failed to apply for an instance of HgStoreNode from node-manager.");
+        }
+
+        return res;
+    }
+
+    public boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+                            Function<NodeTkv, Boolean> action) {
+        Collection<HgNodePartition> partitions =
+                doPartition(table, startKey.getOwner(), endKey.getOwner());
+        for (HgNodePartition partition : partitions) {
+            HgStoreNode storeNode = this.getStoreNode(partition.getNodeId());
+            HgStoreSession session = this.txExecutor.openNodeSession(storeNode);
+            NodeTkv data = new NodeTkv(partition, table, startKey, endKey);
+            data.setSession(session);
+            if (!action.apply(data)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    public boolean doAction(String table, HgOwnerKey startKey, Integer code,
+                            Function<NodeTkv, Boolean> action) {
+        Collection<HgNodePartition> partitions = this.doPartition(table, code);
+        for (HgNodePartition partition : partitions) {
+            HgStoreNode storeNode = this.getStoreNode(partition.getNodeId());
+            HgStoreSession session = this.txExecutor.openNodeSession(storeNode);
+            NodeTkv data = new NodeTkv(partition, table, startKey, code);
+            data.setSession(session);
+            if (!action.apply(data)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    private List<NodeTkv> toNodeTkvList(Builder scanReqBuilder) {
+        // TODO 使用builder获取owner
+        String table = scanReqBuilder.getTable();
+        HgOwnerKey ownerKey = HgStoreClientConst.ALL_PARTITION_OWNER_KEY;
+        byte[] allOwner = ownerKey.getOwner();
+        Collection<HgNodePartition> partitions = doPartition(table,
+                                                             allOwner,
+                                                             allOwner);
+        List<NodeTkv> nodeTkvs = new ArrayList<>(partitions.size());
+        for (HgNodePartition partition : partitions) {
+            nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey));
+        }
+        return nodeTkvs;
+    }
+
+    private List<NodeTkv> toNodeTkvList(String table) {
+        Collection<HgNodePartition> partitions = doPartition(table,
+                                                             HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner(),
+                                                             HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner());
+        ArrayList<NodeTkv> nodeTkvs = new ArrayList<>(partitions.size());
+        for (HgNodePartition partition : partitions) {
+            nodeTkvs.add(new NodeTkv(partition, table, HgStoreClientConst.ALL_PARTITION_OWNER_KEY,
+                                     HgStoreClientConst.ALL_PARTITION_OWNER_KEY));
+        }
+        return nodeTkvs;
+    }
+
+    private List<NodeTkv> toNodeTkvList(String table, HgOwnerKey ownerKey) {
+        Collection<HgNodePartition> partitions =
+                doPartition(table, ownerKey.getOwner(), ownerKey.getOwner());
+        ArrayList<NodeTkv> nodeTkvs = new ArrayList<>(partitions.size());
+        for (HgNodePartition partition : partitions) {
+            nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey));
+        }
+
+        return nodeTkvs;
+    }
+
+    private List<NodeTkv> toNodeTkvList(String table, HgOwnerKey startKey, HgOwnerKey endKey) {
+        Collection<HgNodePartition> partitions =
+                doPartition(table, startKey.getOwner(), endKey.getOwner());
+        ArrayList<NodeTkv> nodeTkvs = new ArrayList<>(partitions.size());
+        for (HgNodePartition partition : partitions) {
+            nodeTkvs.add(new NodeTkv(partition, table, startKey, endKey));
+        }
+        return nodeTkvs;
+    }
+
+    private List<NodeTkv> toNodeTkvList(String table, int startCode, int endCode) {
+        Collection<HgNodePartition> partitions = this.doPartition(table, startCode, endCode);
+        ArrayList<NodeTkv> nodeTkvs = new ArrayList<>(partitions.size());
+        for (HgNodePartition partition : partitions) {
+            nodeTkvs.add(
+                    new NodeTkv(partition, table, HgOwnerKey.codeOf(startCode),
+                                HgOwnerKey.codeOf(endCode)));
+        }
+        return nodeTkvs;
+    }
+
+    /**
+     * @return not null
+     */
+    private Collection<HgNodePartition> doPartition(String table, byte[] startKey, byte[] endKey) {
+        HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+
+        int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startKey,
+                                                    endKey);
+
+        if (status != 0) {
+            throw err("The node-partitioner is not work.");
+        }
+
+        Collection<HgNodePartition> partitions = partitionerBuilder.getPartitions();
+
+        if (partitions.isEmpty()) {
+            throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+        }
+
+        return partitions;
+    }
+
+    /**
+     * @return @return not null
+     */
+    private Collection<HgNodePartition> doPartition(String table, int startCode, int endCode) {
+        HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+        int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startCode,
+                                                    endCode);
+
+        if (status != 0) {
+            throw err("The node-partitioner is not work.");
+        }
+
+        Collection<HgNodePartition> partitions = partitionerBuilder.getPartitions();
+
+        if (partitions.isEmpty()) {
+            throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+        }
+
+        return partitions;
+    }
+
+    Collection<HgNodePartition> doPartition(String table, int partitionId) {
+        HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+        int status =
+                this.nodePartitioner.partition(partitionerBuilder, this.graphName, partitionId);
+
+        if (status != 0) {
+            throw err("The node-partitioner is not work.");
+        }
+
+        Collection<HgNodePartition> partitions = partitionerBuilder.getPartitions();
+
+        if (partitions.isEmpty()) {
+            throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+        }
+
+        return partitions;
+    }
+
+    private Stream<HgPair<HgStoreNode, NodeTkv>> getNodeStream(String table) {
+        return this.toNodeTkvList(table)
+                   .stream()
+                   .map(
+                           e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+                   );
+    }
+
+    Stream<HgPair<HgStoreNode, NodeTkv>> getNodeStream(String table,
+                                                       HgOwnerKey ownerKey) {
+        return this.toNodeTkvList(table, ownerKey)
+                   .stream()
+                   .map(
+                           e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+                   );
+    }
+
+    Stream<HgPair<HgStoreNode, NodeTkv>> getNodeStream(String table, HgOwnerKey startKey,
+                                                       HgOwnerKey endKey) {
+        return this.toNodeTkvList(table, startKey, endKey)
+                   .stream()
+                   .map(
+                           e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+                   );
+
+    }
+
+    // private List<HgPair<HgStoreNode, NodeTkv>> getNode(String table) {
+    //    List<NodeTkv> nodeTkvList = this.toNodeTkvList(table);
+    //    return nodeTkv2Node(nodeTkvList);
+    // }
+
+    List<HgPair<HgStoreNode, NodeTkv>> getNode(String table, HgOwnerKey ownerKey) {
+        List<NodeTkv> nodeTkvList = this.toNodeTkvList(table, ownerKey);
+        return nodeTkv2Node(nodeTkvList);
+    }
+
+    List<HgPair<HgStoreNode, NodeTkv>> getNode(String table, HgOwnerKey startKey,
+                                               HgOwnerKey endKey) {
+        List<NodeTkv> nodeTkvList = this.toNodeTkvList(table, startKey, endKey);
+        return nodeTkv2Node(nodeTkvList);
+
+    }
+    //
+    //boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+    //                                            Function<NodeTkv, Boolean> action) {
+    //    return this.doAction(table, startKey, endKey, action);
+    //
+    //}
+
+    // List<HgPair<HgStoreNode, NodeTkv>> getNode(String table, Integer endKey) {
+    //       .stream()
+    //            .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(), nodeParams.getY
+    //            ().getKeyCode()))
+    //            .map(
+    //                    e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId()), e)
+    //                );
+    //    Collection<HgNodePartition> nodePartitions = this.doPartition(table, endKey);
+    //    for (HgNodePartition nodePartition: nodePartitions) {
+    //
+    //    }
+    //    return nodeTkv2Node(nodeTkvList);
+    //
+    // }
+
+    private List<HgPair<HgStoreNode, NodeTkv>> nodeTkv2Node(Collection<NodeTkv> nodeTkvList) {
+        ArrayList<HgPair<HgStoreNode, NodeTkv>> hgPairs = new ArrayList<>(nodeTkvList.size());
+        for (NodeTkv nodeTkv : nodeTkvList) {
+            hgPairs.add(new HgPair<>(this.getStoreNode(nodeTkv.getNodeId()), nodeTkv));
+        }
+        return hgPairs;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java
new file mode 100644
index 0000000..aca7bb7
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * Proxy iterator orderly, to switch next one will happen when the current one is empty.
+ * <p>
+ * created on 2022/03/10
+ *
+ * @version 0.1.0
+ */
+@Slf4j
+public class SequencedIterator implements HgKvIterator {
+
+    private static final byte[] EMPTY_BYTES = new byte[0];
+    private final Queue<HgKvOrderedIterator> queue;
+    private final long limit;
+    private HgKvOrderedIterator<HgKvEntry> iterator;
+    private HgKvEntry entry;
+    private int count;
+    private byte[] position = EMPTY_BYTES;
+    private byte[] position4Seeking = EMPTY_BYTES;
+
+    SequencedIterator(List<HgKvOrderedIterator> iterators, long limit) {
+        Collections.sort(iterators);
+        this.queue = new LinkedList(iterators);
+        this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+    }
+
+    private HgKvOrderedIterator getIterator() {
+        if (this.queue.isEmpty()) {
+            return null;
+        }
+        HgKvOrderedIterator buf;
+        while ((buf = this.queue.poll()) != null) {
+            buf.seek(this.position4Seeking);
+            if (buf.hasNext()) {
+                break;
+            }
+        }
+        return buf;
+    }
+
+    private void closeIterators() {
+        if (this.queue.isEmpty()) {
+            return;
+        }
+        HgKvOrderedIterator buf;
+        while ((buf = this.queue.poll()) != null) {
+            buf.close();
+        }
+
+    }
+
+    @Override
+    public byte[] key() {
+        if (this.entry != null) {
+            return this.entry.key();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] value() {
+        if (this.entry != null) {
+            return this.entry.value();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] position() {
+        return this.position;
+    }
+
+    @Override
+    public void seek(byte[] pos) {
+        if (pos != null) {
+            this.position4Seeking = pos;
+        }
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (this.count >= this.limit) {
+            return false;
+        }
+        if (this.iterator == null) {
+            this.iterator = this.getIterator();
+        } else if (!this.iterator.hasNext()) {
+            this.iterator.close();
+            this.iterator = this.getIterator();
+        }
+        return this.iterator != null;
+    }
+
+    @Override
+    public Object next() {
+        if (this.iterator == null) {
+            hasNext();
+        }
+        if (this.iterator == null) {
+            throw new NoSuchElementException();
+        }
+        this.entry = this.iterator.next();
+        this.position = this.iterator.position();
+        if (!this.iterator.hasNext()) {
+            this.iterator.close();
+            this.iterator = null;
+        }
+        this.count++;
+        return this.entry;
+    }
+
+    @Override
+    public void close() {
+        if (this.iterator != null) {
+            this.iterator.close();
+        }
+        this.closeIterators();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java
new file mode 100644
index 0000000..474b042
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvPagingIterator;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/24
+ *
+ * @version 0.1.1
+ */
+@Slf4j
+public class ShiftWorkIteratorProxy implements HgKvIterator {
+
+    private static final byte[] EMPTY_BYTES = new byte[0];
+    private final int limit;
+    private HgKvPagingIterator<HgKvEntry> iterator;
+    private Queue<HgKvPagingIterator> queue = new LinkedList<>();
+    private HgKvEntry entry;
+    private int count;
+    private int shiftCount;
+
+    ShiftWorkIteratorProxy(List<HgKvPagingIterator> iterators, int limit) {
+        this.queue = new LinkedList<>(iterators);
+        this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+    }
+
+    private HgKvPagingIterator getIterator() {
+        if (this.queue.isEmpty()) {
+            return null;
+        }
+
+        HgKvPagingIterator buf = null;
+
+        while ((buf = this.queue.poll()) != null) {
+            if (buf.hasNext()) {
+                break;
+            }
+        }
+
+        if (buf == null) {
+            return null;
+        }
+
+        this.queue.add(buf);
+
+        return buf;
+    }
+
+    private void closeIterators() {
+        if (this.queue.isEmpty()) {
+            return;
+        }
+
+        HgKvPagingIterator buf;
+
+        while ((buf = this.queue.poll()) != null) {
+            buf.close();
+        }
+
+    }
+
+    private void setIterator() {
+
+        //   if (++this.shiftCount >= this.iterator.getPageSize() / 2) {
+        if (++this.shiftCount >= this.iterator.getPageSize()) {
+            this.iterator = null;
+            this.shiftCount = 0;
+        }
+
+    }
+
+    private void doNext() {
+
+    }
+
+    @Override
+    public byte[] key() {
+        if (this.entry != null) {
+            return this.entry.key();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] value() {
+        if (this.entry != null) {
+            return this.entry.value();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] position() {
+        return this.iterator != null ? this.iterator.position() : EMPTY_BYTES;
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        if (this.iterator != null) {
+            this.iterator.seek(position);
+        }
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (this.count >= this.limit) {
+            return false;
+        }
+        if (this.iterator == null
+            || !this.iterator.hasNext()) {
+            this.iterator = this.getIterator();
+        }
+        return this.iterator != null;
+    }
+
+    @Override
+    public Object next() {
+        if (this.iterator == null) {
+            hasNext();
+        }
+        if (this.iterator == null) {
+            throw new NoSuchElementException();
+        }
+        this.entry = this.iterator.next();
+        this.setIterator();
+        this.count++;
+        //log.info("next - > {}",this.entry);
+        return this.entry;
+    }
+
+    @Override
+    public void close() {
+        if (this.iterator != null) {
+            this.iterator.close();
+        }
+        this.closeIterators();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java
new file mode 100644
index 0000000..21a37ae
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+
+/**
+ * created on 2021/10/21
+ *
+ * @version 0.1.0
+ */
+class TopWorkIteratorProxy implements HgKvIterator {
+
+    private static final byte[] EMPTY_BYTES = new byte[0];
+    private final Queue<HgKvIterator> queue;
+    private final long limit;
+    private HgKvIterator<HgKvEntry> iterator;
+    private HgKvEntry entry;
+    // result count
+    private int count;
+
+    TopWorkIteratorProxy(List<HgKvIterator> iterators, long limit) {
+        this.queue = new LinkedList<>(iterators);
+        this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+    }
+
+    private HgKvIterator getIterator() {
+        if (this.queue.isEmpty()) {
+            return null;
+        }
+
+        HgKvIterator buf = null;
+
+        while ((buf = this.queue.poll()) != null) {
+            if (buf.hasNext()) {
+                break;
+            }
+        }
+
+        if (buf == null) {
+            return null;
+        }
+
+        this.queue.add(buf);
+
+        return buf;
+    }
+
+    private void closeIterators() {
+        if (this.queue.isEmpty()) {
+            return;
+        }
+
+        HgKvIterator buf;
+
+        while ((buf = this.queue.poll()) != null) {
+            buf.close();
+        }
+
+    }
+
+    private void setIterator() {
+        this.iterator = null;
+    }
+
+    @Override
+    public byte[] key() {
+        if (this.entry != null) {
+            return this.entry.key();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] value() {
+        if (this.entry != null) {
+            return this.entry.value();
+        }
+        return null;
+    }
+
+    @Override
+    public byte[] position() {
+        return this.iterator != null ? this.iterator.position() : EMPTY_BYTES;
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        if (this.iterator != null) {
+            this.iterator.seek(position);
+        }
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (this.count >= this.limit) {
+            return false;
+        }
+        if (this.iterator == null) {
+            this.iterator = this.getIterator();
+        }
+        return this.iterator != null;
+
+    }
+
+    @Override
+    public Object next() {
+        if (this.iterator == null) {
+            hasNext();
+        }
+        if (this.iterator == null) {
+            throw new NoSuchElementException();
+        }
+        this.entry = this.iterator.next();
+        this.setIterator();
+        this.count++;
+        return this.entry;
+    }
+
+    @Override
+    public void close() {
+        if (this.iterator != null) {
+            this.iterator.close();
+        }
+        this.closeIterators();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java
new file mode 100644
index 0000000..20aa54b
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.IntStream;
+
+import org.apache.hugegraph.store.client.util.ExecutorPool;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.term.HgPair;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.stub.AbstractAsyncStub;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+
+public abstract class AbstractGrpcClient {
+
+    private static Map<String, ManagedChannel[]> channels = new ConcurrentHashMap<>();
+    private static int n = 5;
+    private static int concurrency = 1 << n;
+    private static AtomicLong counter = new AtomicLong(0);
+    private static long limit = Long.MAX_VALUE >> 1;
+    private static HgStoreClientConfig config = HgStoreClientConfig.of();
+    private Map<String, HgPair<ManagedChannel, AbstractBlockingStub>[]> blockingStubs =
+            new ConcurrentHashMap<>();
+    private Map<String, HgPair<ManagedChannel, AbstractAsyncStub>[]> asyncStubs =
+            new ConcurrentHashMap<>();
+    private ThreadPoolExecutor executor;
+
+    {
+        executor = ExecutorPool.createExecutor("common", 60, concurrency, concurrency);
+    }
+
+    public AbstractGrpcClient() {
+
+    }
+
+    public ManagedChannel[] getChannels(String target) {
+        ManagedChannel[] tc;
+        if ((tc = channels.get(target)) == null) {
+            synchronized (channels) {
+                if ((tc = channels.get(target)) == null) {
+                    try {
+                        ManagedChannel[] value = new ManagedChannel[concurrency];
+                        CountDownLatch latch = new CountDownLatch(concurrency);
+                        for (int i = 0; i < concurrency; i++) {
+                            int fi = i;
+                            executor.execute(() -> {
+                                try {
+                                    value[fi] = getManagedChannel(target);
+                                } catch (Exception e) {
+                                    throw new RuntimeException(e);
+                                } finally {
+                                    latch.countDown();
+                                }
+                            });
+                        }
+                        latch.await();
+                        channels.put(target, tc = value);
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                }
+            }
+        }
+        return tc;
+    }
+
+    public abstract AbstractBlockingStub getBlockingStub(ManagedChannel channel);
+
+    public AbstractBlockingStub getBlockingStub(String target) {
+        ManagedChannel[] channels = getChannels(target);
+        HgPair<ManagedChannel, AbstractBlockingStub>[] pairs = blockingStubs.get(target);
+        long l = counter.getAndIncrement();
+        if (l >= limit) {
+            counter.set(0);
+        }
+        int index = (int) (l & (concurrency - 1));
+        if (pairs == null) {
+            synchronized (blockingStubs) {
+                pairs = blockingStubs.get(target);
+                if (pairs == null) {
+                    HgPair<ManagedChannel, AbstractBlockingStub>[] value = new HgPair[concurrency];
+                    IntStream.range(0, concurrency).forEach(i -> {
+                        ManagedChannel channel = channels[index];
+                        AbstractBlockingStub stub = getBlockingStub(channel);
+                        value[i] = new HgPair<>(channel, stub);
+                        // log.info("create channel for {}",target);
+                    });
+                    blockingStubs.put(target, value);
+                    AbstractBlockingStub stub = value[index].getValue();
+                    return (AbstractBlockingStub) setBlockingStubOption(stub);
+                }
+            }
+        }
+        return (AbstractBlockingStub) setBlockingStubOption(pairs[index].getValue());
+    }
+
+    private AbstractStub setBlockingStubOption(AbstractBlockingStub stub) {
+        return stub.withDeadlineAfter(config.getGrpcTimeoutSeconds(), TimeUnit.SECONDS)
+                   .withMaxInboundMessageSize(
+                           config.getGrpcMaxInboundMessageSize())
+                   .withMaxOutboundMessageSize(
+                           config.getGrpcMaxOutboundMessageSize());
+    }
+
+    public AbstractAsyncStub getAsyncStub(ManagedChannel channel) {
+        return null;
+    }
+
+    public AbstractAsyncStub getAsyncStub(String target) {
+        ManagedChannel[] channels = getChannels(target);
+        HgPair<ManagedChannel, AbstractAsyncStub>[] pairs = asyncStubs.get(target);
+        long l = counter.getAndIncrement();
+        if (l >= limit) {
+            counter.set(0);
+        }
+        int index = (int) (l & (concurrency - 1));
+        if (pairs == null) {
+            synchronized (asyncStubs) {
+                pairs = asyncStubs.get(target);
+                if (pairs == null) {
+                    HgPair<ManagedChannel, AbstractAsyncStub>[] value = new HgPair[concurrency];
+                    IntStream.range(0, concurrency).parallel().forEach(i -> {
+                        ManagedChannel channel = channels[index];
+                        AbstractAsyncStub stub = getAsyncStub(channel);
+                        // stub.withMaxInboundMessageSize(config.getGrpcMaxInboundMessageSize())
+                        //    .withMaxOutboundMessageSize(config.getGrpcMaxOutboundMessageSize());
+                        value[i] = new HgPair<>(channel, stub);
+                        // log.info("create channel for {}",target);
+                    });
+                    asyncStubs.put(target, value);
+                    AbstractAsyncStub stub =
+                            (AbstractAsyncStub) setStubOption(value[index].getValue());
+                    return stub;
+                }
+            }
+        }
+        return (AbstractAsyncStub) setStubOption(pairs[index].getValue());
+
+    }
+
+    private AbstractStub setStubOption(AbstractStub value) {
+        return value.withMaxInboundMessageSize(
+                            config.getGrpcMaxInboundMessageSize())
+                    .withMaxOutboundMessageSize(
+                            config.getGrpcMaxOutboundMessageSize());
+    }
+
+    private ManagedChannel getManagedChannel(String target) {
+        return ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java
new file mode 100644
index 0000000..0cc4b30
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.HgKvEntry;
+
+class GrpcKvEntryImpl implements HgKvEntry {
+
+    private final byte[] key;
+    private final byte[] value;
+    private final int code;
+
+    GrpcKvEntryImpl(byte[] k, byte[] v, int code) {
+        this.key = k;
+        this.value = v;
+        this.code = code;
+    }
+
+    @Override
+    public int code() {
+        return code;
+    }
+
+    @Override
+    public byte[] key() {
+        return key;
+    }
+
+    @Override
+    public byte[] value() {
+        return value;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        GrpcKvEntryImpl hgKvEntry = (GrpcKvEntryImpl) o;
+        return Arrays.equals(key, hgKvEntry.key) && Arrays.equals(value, hgKvEntry.value);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = Arrays.hashCode(key);
+        result = 31 * result + Arrays.hashCode(value);
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "HgKvEntryImpl{" +
+               "key=" + Arrays.toString(key) +
+               ", value=" + Arrays.toString(value) +
+               ", code=" + code +
+               '}';
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java
new file mode 100644
index 0000000..c9825a6
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.List;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgKvPagingIterator;
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.grpc.common.Kv;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/20
+ *
+ * @version 0.2.1
+ */
+@Slf4j
+class GrpcKvIteratorImpl implements HgKvPagingIterator<HgKvEntry>, HgKvOrderedIterator<HgKvEntry> {
+
+    private final byte[] emptyBytes = HgStoreClientConst.EMPTY_BYTES;
+    private final KvCloseableIterator<Kv> iterator;
+    private final HgPageSize pageLimiter;
+    private final HgStoreNodeSession session;
+    private HgKvEntry element;
+
+    private GrpcKvIteratorImpl(HgStoreNodeSession session, KvCloseableIterator<Kv> iterator,
+                               HgPageSize pageLimiter) {
+        this.iterator = iterator;
+        this.pageLimiter = pageLimiter;
+        this.session = session;
+    }
+
+    public static HgKvIterator<HgKvEntry> of(HgStoreNodeSession nodeSession,
+                                             KvCloseableIterator<Kv> iterator) {
+        if (iterator instanceof HgPageSize) {
+            return of(nodeSession, iterator, (HgPageSize) iterator);
+        }
+        return new GrpcKvIteratorImpl(nodeSession, iterator, () -> 1);
+    }
+
+    public static HgKvIterator<HgKvEntry> of(HgStoreNodeSession nodeSession,
+                                             KvCloseableIterator<Kv> iterator,
+                                             HgPageSize pageLimiter) {
+        return new GrpcKvIteratorImpl(nodeSession, iterator, pageLimiter);
+    }
+
+    public static HgKvIterator<HgKvEntry> of(HgStoreNodeSession nodeSession, List<Kv> kvList) {
+        int pageSize = kvList.size();
+        return new GrpcKvIteratorImpl(nodeSession, new KvListIterator<Kv>(kvList), () -> pageSize);
+    }
+
+    @Override
+    public boolean hasNext() {
+        // if (log.isDebugEnabled()) {
+        //    if (!this.iterator.hasNext() && !nodeSession.getGraphName().endsWith("/s")) {
+        //        log.debug("[ANALYSIS GrpcKv hasNext-> FALSE] ");
+        //    }
+        // }
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public HgKvEntry next() {
+        Kv kv = this.iterator.next();
+        this.element = new GrpcKvEntryImpl(kv.getKey().toByteArray(), kv.getValue().toByteArray(),
+                                           kv.getCode());
+        return this.element;
+    }
+
+    @Override
+    public byte[] key() {
+        if (this.element == null) {
+            return null;
+        }
+        return this.element.key();
+    }
+
+    @Override
+    public byte[] value() {
+        if (this.element == null) {
+            return null;
+        }
+        return this.element.value();
+    }
+
+    @Override
+    public byte[] position() {
+        if (this.element == null) {
+            return emptyBytes;
+        }
+        byte[] key = this.element.key();
+        if (key == null) {
+            return emptyBytes;
+        }
+        if (!(this.iterator instanceof HgSeekAble)) {
+            return emptyBytes;
+        }
+        byte[] upstream = ((HgSeekAble) this.iterator).position();
+        byte[] code = HgStoreClientUtil.toIntBytes(this.element.code());
+        byte[] result = new byte[upstream.length + Integer.BYTES + key.length];
+        System.arraycopy(upstream, 0, result, 0, upstream.length);
+        System.arraycopy(code, 0, result, upstream.length, Integer.BYTES);
+        System.arraycopy(key, 0, result, upstream.length + Integer.BYTES, key.length);
+        return result;
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        if (this.iterator instanceof HgSeekAble) {
+            ((HgSeekAble) this.iterator).seek(position);
+        }
+    }
+
+    @Override
+    public long getPageSize() {
+        return pageLimiter.getPageSize();
+    }
+
+    @Override
+    public boolean isPageEmpty() {
+        return !iterator.hasNext();
+    }
+
+    @Override
+    public int compareTo(HgKvOrderedIterator o) {
+        return Long.compare(this.getSequence(), o.getSequence());
+    }
+
+    @Override
+    public long getSequence() {
+        return this.session.getStoreNode().getNodeId().longValue();
+    }
+
+    @Override
+    public void close() {
+        this.iterator.close();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java
new file mode 100644
index 0000000..5f66470
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.grpc.HealthyGrpc;
+import org.apache.hugegraph.store.grpc.HealthyOuterClass;
+
+import com.google.protobuf.Empty;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+
+/**
+ *
+ */
+@ThreadSafe
+public final class GrpcNodeHealthyClient {
+
+    private final static Map<String, ManagedChannel> CHANNEL_MAP = new ConcurrentHashMap<>();
+    private final static Map<String, HealthyGrpc.HealthyBlockingStub> STUB_MAP =
+            new ConcurrentHashMap<>();
+
+    // TODO: Forbid constructing out of the package.
+    public GrpcNodeHealthyClient() {
+
+    }
+
+    private ManagedChannel getChannel(String target) {
+        ManagedChannel channel = CHANNEL_MAP.get(target);
+        if (channel == null) {
+            channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+            CHANNEL_MAP.put(target, channel);
+        }
+        return channel;
+    }
+
+    private HealthyGrpc.HealthyBlockingStub getStub(String target) {
+        HealthyGrpc.HealthyBlockingStub stub = STUB_MAP.get(target);
+        if (stub == null) {
+            stub = HealthyGrpc.newBlockingStub(getChannel(target));
+            STUB_MAP.put(target, stub);
+        }
+        return stub;
+    }
+
+
+/*    boolean isHealthy(GrpcStoreNodeImpl node) {
+        String target = node.getAddress();
+
+        HealthyOuterClass.StringReply response = getStub(target).isOk(Empty.newBuilder().build());
+        String res = response.getMessage();
+
+        if ("ok".equals(res)) {
+            return true;
+        } else {
+            System.out.printf("gRPC-res-msg: %s%n", res);
+            return false;
+        }
+    }*/
+
+    public boolean isHealthy() {
+        String target = "localhost:9080";
+        ManagedChannel channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+        HealthyGrpc.HealthyBlockingStub stub = HealthyGrpc.newBlockingStub(channel);
+        HealthyOuterClass.StringReply response = stub.isOk(Empty.newBuilder().build());
+
+        String res = response.getMessage();
+        System.out.printf("gRPC response message:%s%n", res);
+
+        return "ok".equals(res);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java
new file mode 100644
index 0000000..eb215a4
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hugegraph.store.client.HgPrivate;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeBuilder;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.util.HgAssert;
+
+/**
+ * created on 2021/10/12
+ */
+public class GrpcStoreNodeBuilder implements HgStoreNodeBuilder {
+
+    private static final GrpcStoreSessionClient sessionClient = new GrpcStoreSessionClient();
+    private static final GrpcStoreStreamClient streamClient = new GrpcStoreStreamClient();
+    private static final AtomicLong ids = new AtomicLong(0);
+    private final HgStoreNodeManager nodeManager;
+    private Long nodeId;
+    private String address;
+
+    public GrpcStoreNodeBuilder(HgStoreNodeManager nodeManager, HgPrivate hgPrivate) {
+        HgAssert.isArgumentNotNull(hgPrivate, "hgPrivate");
+        HgAssert.isArgumentNotNull(nodeManager, "nodeManager");
+        this.nodeManager = nodeManager;
+    }
+
+    @Override
+    public GrpcStoreNodeBuilder setAddress(String address) {
+        HgAssert.isFalse(HgAssert.isInvalid(address), "The argument is invalid: address.");
+        this.address = address;
+        return this;
+    }
+
+    @Override
+    public GrpcStoreNodeBuilder setNodeId(Long nodeId) {
+        HgAssert.isFalse(nodeId == null, "The argument is invalid: nodeId.");
+        this.nodeId = nodeId;
+        return this;
+    }
+
+    @Override
+    public HgStoreNode build() {
+        // TODO: delete
+        if (this.nodeId == null) {
+            this.nodeId = ids.addAndGet(-1L);
+        }
+
+        HgAssert.isFalse(this.nodeId == null, "nodeId can't to be null");
+        HgAssert.isFalse(this.address == null, "address can't to be null");
+
+        GrpcStoreNodeImpl node =
+                new GrpcStoreNodeImpl(this.nodeManager, sessionClient, streamClient);
+        node.setNodeId(this.nodeId);
+        node.setAddress(this.address);
+
+        return node;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java
new file mode 100644
index 0000000..4ca468b
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+
+/**
+ * created on 2021/10/11
+ */
+class GrpcStoreNodeImpl implements HgStoreNode {
+
+    private final GrpcStoreSessionClient sessionClient;
+    private final GrpcStoreStreamClient streamClient;
+    private final HgStoreNodeManager nodeManager;
+    private String address;
+    private Long nodeId;
+
+    GrpcStoreNodeImpl(HgStoreNodeManager nodeManager, GrpcStoreSessionClient sessionClient,
+                      GrpcStoreStreamClient streamClient) {
+        this.nodeManager = nodeManager;
+        this.sessionClient = sessionClient;
+        this.streamClient = streamClient;
+    }
+
+    @Override
+    public Long getNodeId() {
+        return this.nodeId;
+    }
+
+    GrpcStoreNodeImpl setNodeId(Long nodeId) {
+        this.nodeId = nodeId;
+        return this;
+    }
+
+    @Override
+    public String getAddress() {
+        return this.address;
+    }
+
+    GrpcStoreNodeImpl setAddress(String address) {
+        this.address = address;
+        return this;
+    }
+
+    @Override
+    public HgStoreSession openSession(String graphName) {
+        // HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid.");
+        // return new GrpcStoreNodeSessionImpl2(this, graphName,this.nodeManager, this
+        // .sessionClient, this
+        // .streamClient);
+        return new GrpcStoreNodeSessionImpl(this, graphName, this.nodeManager, this.sessionClient,
+                                            this.streamClient);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        GrpcStoreNodeImpl that = (GrpcStoreNodeImpl) o;
+        return Objects.equals(address, that.address) && Objects.equals(nodeId, that.nodeId);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(address, nodeId);
+    }
+
+    @Override
+    public String toString() {
+        return "storeNode: {" +
+               "address: \"" + address + "\"" +
+               ", nodeId: " + nodeId +
+               "}";
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java
new file mode 100644
index 0000000..77c8a45
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java
@@ -0,0 +1,545 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvStore;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.client.util.HgUuid;
+import org.apache.hugegraph.store.grpc.common.GraphMethod;
+import org.apache.hugegraph.store.grpc.common.Key;
+import org.apache.hugegraph.store.grpc.common.OpType;
+import org.apache.hugegraph.store.grpc.common.TableMethod;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.UnsafeByteOperations;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/01/19
+ *
+ * @version 0.6.0 added batch get on 2022/04/06
+ */
+@Slf4j
+@NotThreadSafe
+class GrpcStoreNodeSessionImpl implements HgStoreNodeSession {
+
+    private static final HgStoreClientConfig hgStoreClientConfig = HgStoreClientConfig.of();
+    private static final ConcurrentHashMap<String, Integer> tables = new ConcurrentHashMap<>() {{
+        put("unknown", 0);
+        put("g+v", 1);
+        put("g+oe", 2);
+        put("g+ie", 3);
+        put("g+index", 4);
+        put("g+task", 5);
+        put("g+olap", 6);
+        put("g+server", 7);
+    }};
+    private final HgStoreNode storeNode;
+    private final String graphName;
+    private final GrpcStoreSessionClient storeSessionClient;
+    private final GrpcStoreStreamClient storeStreamClient;
+    private final HgStoreNodeManager nodeManager;
+    private final NotifyingExecutor notifier;
+    private final SwitchingExecutor switcher;
+    private final BatchEntry.Builder batchEntryBuilder = BatchEntry.newBuilder();
+    private final Key.Builder builder = Key.newBuilder();
+    private boolean isAutoCommit = true;
+    private String batchId;
+    private LinkedList<BatchEntry> batchEntries = new LinkedList<>();
+
+    GrpcStoreNodeSessionImpl(HgStoreNode storeNode, String graphName,
+                             HgStoreNodeManager nodeManager,
+                             GrpcStoreSessionClient sessionClient,
+                             GrpcStoreStreamClient streamClient) {
+        HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid.");
+        HgAssert.isFalse(nodeManager == null, "the argument: nodeManager is null.");
+        HgAssert.isFalse(storeNode == null, "the argument: storeNode is null.");
+        HgAssert.isFalse(sessionClient == null, "the argument: sessionClient is null.");
+        HgAssert.isFalse(streamClient == null, "the argument: streamClient is null.");
+
+        this.graphName = graphName;
+        this.storeNode = storeNode;
+        this.storeSessionClient = sessionClient;
+        this.storeStreamClient = streamClient;
+        this.nodeManager = nodeManager;
+
+        this.notifier = new NotifyingExecutor(this.graphName, this.nodeManager, this);
+        this.switcher = SwitchingExecutor.of();
+    }
+
+    @Override
+    public String getGraphName() {
+        return graphName;
+    }
+
+    @Override
+    public HgStoreNode getStoreNode() {
+        return storeNode;
+    }
+
+    public Key toKey(HgOwnerKey ownerKey) {
+        if (ownerKey == null) {
+            return null;
+        }
+        return builder
+                .setKey(UnsafeByteOperations.unsafeWrap(ownerKey.getKey()))
+                .setCode(ownerKey.getKeyCode())
+                .build();
+    }
+
+    @Override
+    public void beginTx() {
+        this.isAutoCommit = false;
+    }
+
+    @Override
+    public void commit() {
+        try {
+            if (this.isAutoCommit) {
+                throw new IllegalStateException("It's not in tx state");
+            }
+            if (this.batchEntries.isEmpty()) {
+                this.resetTx();
+                return;
+            }
+            if (!this.doCommit(this.batchEntries)) {
+                throw new Exception("Failed to invoke doCommit");
+            }
+        } catch (Throwable t) {
+            throw new RuntimeException(t);
+        } finally {
+            this.resetTx();
+        }
+
+    }
+
+    @Override
+    public void rollback() {
+        if (this.isAutoCommit) {
+            throw new IllegalStateException("It's not in tx state");
+        }
+        this.resetTx();
+    }
+
+    @Override
+    public boolean isTx() {
+        return !this.isAutoCommit;
+    }
+
+    private void resetTx() {
+        this.isAutoCommit = true;
+        this.batchId = null;
+        this.batchEntries = new LinkedList<>();
+    }
+
+    //TODO: not support distributed tx yet.
+    private String getBatchId() {
+        if (this.isAutoCommit) {
+            this.batchId = HgUuid.newUUID();
+        } else {
+            if (this.batchId == null) {
+                this.batchId = HgUuid.newUUID();
+            }
+        }
+        return this.batchId;
+    }
+
+    @Override
+    public boolean put(String table, HgOwnerKey ownerKey, byte[] value) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_PUT, table, ownerKey, null, value);
+    }
+
+    @Override
+    public boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value) {
+        return false;
+    }
+
+    @Override
+    public boolean delete(String table, HgOwnerKey ownerKey) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_DEL, table, ownerKey, null, null);
+    }
+
+    @Override
+    public boolean deleteSingle(String table, HgOwnerKey ownerKey) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_DEL_SINGLE, table, ownerKey, null, null);
+    }
+
+    @Override
+    public boolean deletePrefix(String table, HgOwnerKey prefix) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_DEL_PREFIX, table, prefix, null, null);
+    }
+
+    @Override
+    public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_DEL_RANGE, table, start, end, null);
+    }
+
+    @Override
+    public boolean merge(String table, HgOwnerKey key, byte[] value) {
+        return this.prepareBatchEntry(OpType.OP_TYPE_MERGE, table, key, null, value);
+    }
+
+    private boolean prepareBatchEntry(OpType opType, String table
+            , HgOwnerKey startKey, HgOwnerKey endKey, byte[] value) {
+        this.batchEntryBuilder.clear().setOpType(opType);
+        this.batchEntryBuilder.setTable(tables.get(table));
+        if (startKey != null) {
+            this.batchEntryBuilder.setStartKey(toKey(startKey));
+        }
+        if (endKey != null) {
+            this.batchEntryBuilder.setEndKey(toKey(endKey));
+        }
+        if (value != null) {
+            this.batchEntryBuilder.setValue(ByteString.copyFrom(value));
+        }
+        if (this.isAutoCommit) {
+            return this.doCommit(Collections.singletonList(this.batchEntryBuilder.build()));
+        } else {
+            return this.batchEntries.add(this.batchEntryBuilder.build());
+        }
+
+    }
+
+    private boolean doCommit(List<BatchEntry> entries) {
+        return this.notifier.invoke(
+                () -> this.storeSessionClient.doBatch(this, this.getBatchId(), entries),
+                e -> true
+        ).orElse(false);
+    }
+
+    @Override
+    public byte[] get(String table, HgOwnerKey ownerKey) {
+        return this.notifier.invoke(
+                () -> this.storeSessionClient.doGet(this, table, ownerKey)
+                ,
+                e -> e.getValueResponse().getValue().toByteArray()
+        ).orElse(HgStoreClientConst.EMPTY_BYTES);
+    }
+
+    @Override
+    public boolean clean(int partId) {
+        return this.notifier.invoke(
+                () -> this.storeSessionClient.doClean(this, partId)
+                ,
+                e -> true
+
+        ).orElse(false);
+    }
+
+    @Override
+    public List<HgKvEntry> batchGetOwner(String table, List<HgOwnerKey> keyList) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doBatchGet(this, table, keyList),
+                           e -> e.getKeyValueResponse().getKvList()
+                                 .stream()
+                                 .map(kv -> (HgKvEntry) new GrpcKvEntryImpl(kv.getKey().toByteArray()
+                                         , kv.getValue().toByteArray(), kv.getCode())
+                                 )
+                                 .collect(Collectors.toList()))
+                            .orElse((List<HgKvEntry>) HgStoreClientConst.EMPTY_LIST);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> batchPrefix(String table, List<HgOwnerKey> keyList) {
+        return GrpcKvIteratorImpl.of(this,
+                                     this.storeStreamClient.doBatchScanOneShot(this,
+                                                                               HgScanQuery.prefixOf(
+                                                                                       table,
+                                                                                       keyList))
+        );
+    }
+
+    @Override
+    public boolean existsTable(String table) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doTable(this, table,
+                                                                 TableMethod.TABLE_METHOD_EXISTS),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public boolean createTable(String table) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doTable(this, table,
+                                                                 TableMethod.TABLE_METHOD_CREATE),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public boolean deleteTable(String table) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doTable(this, table,
+                                                                 TableMethod.TABLE_METHOD_DELETE),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public boolean dropTable(String table) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doTable(this, table,
+                                                                 TableMethod.TABLE_METHOD_DROP),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public boolean deleteGraph(String graph) {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doGraph(this, graph,
+                                                                 GraphMethod.GRAPH_METHOD_DELETE),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public boolean truncate() {
+        return this.notifier.invoke(
+                           () -> this.storeSessionClient.doTable(this,
+                                                                 HgStoreClientConst.EMPTY_TABLE
+                                   , TableMethod.TABLE_METHOD_TRUNCATE),
+                           e -> true)
+                            .orElse(false);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table) {
+        return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0));
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, long limit) {
+        return this.switcher.invoke(getSwitcherSupplier(limit)
+                , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan
+                                                                                  (this, table,
+                                                                                   limit))
+                , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot
+                                                                                  (this, table,
+                                                                                   limit))
+        ).get();
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(ScanStreamReq.Builder builder) {
+        HgStoreStreamStub stub = getStub();
+        KvPageScanner scanner = new KvPageScanner(this,
+                                                  stub,
+                                                  builder);
+        return GrpcKvIteratorImpl.of(this, scanner);
+    }
+
+    @Override
+    public long count(String table) {
+        return this.storeSessionClient.count(this, table);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, byte[] query) {
+        return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0, query));
+    }
+
+    private HgStoreStreamStub getStub() {
+        return this.storeStreamClient.getStub(this);
+    }
+
+    // @Override
+    // public HgKvIterator<HgKvEntry> scanIterator(ScanStreamReq scanReq) {
+    //    KvPageScanner6 scanner = new KvPageScanner6(this,
+    //                                                getStub(),
+    //                                                scanReq.toBuilder());
+    //    return GrpcKvIteratorImpl.of(this, scanner);
+    // }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, long limit, byte[] query) {
+        return this.switcher.invoke(getSwitcherSupplier(limit)
+                , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan
+                                                                                  (this, table,
+                                                                                   limit, query))
+                , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot
+                                                                                  (this, table,
+                                                                                   limit, query))
+        ).get();
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix) {
+        return GrpcKvIteratorImpl.of(this,
+                                     this.storeStreamClient.doScan(this, table, keyPrefix, 0));
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit) {
+        return this.switcher.invoke(getSwitcherSupplier(limit),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScan(this,
+                                                                                              table,
+                                                                                              keyPrefix,
+                                                                                              limit)),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScanOneShot(
+                                                                        this,
+                                                                        table,
+                                                                        keyPrefix,
+                                                                        limit)))
+                            .get();
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey keyPrefix, long limit,
+                                                byte[] query) {
+        return this.switcher.invoke(getSwitcherSupplier(limit),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScan(
+                                                                        this,
+                                                                        table,
+                                                                        keyPrefix,
+                                                                        limit,
+                                                                        query)),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScanOneShot(
+                                                                        this,
+                                                                        table,
+                                                                        keyPrefix,
+                                                                        limit,
+                                                                        query)))
+                            .get();
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey) {
+        return scanIterator(table, startKey, endKey, 0, HgKvStore.SCAN_ANY, null);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey, long limit) {
+        return scanIterator(table, startKey, endKey, limit,
+                            HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE :
+                            HgStoreClientConst.SCAN_TYPE_ANY, null);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey, long limit, byte[] query) {
+        return scanIterator(table, startKey, endKey, limit,
+                            HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE :
+                            HgStoreClientConst.SCAN_TYPE_ANY, query);
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, HgOwnerKey startKey,
+                                                HgOwnerKey endKey,
+                                                long limit, int scanType, byte[] query) {
+
+        return this.switcher.invoke(getSwitcherSupplier(limit),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScan(
+                                                                        this,
+                                                                        table,
+                                                                        startKey,
+                                                                        endKey,
+                                                                        limit,
+                                                                        scanType,
+                                                                        query)),
+                                    () -> GrpcKvIteratorImpl.of(this,
+                                                                this.storeStreamClient.doScanOneShot(
+                                                                        this,
+                                                                        table,
+                                                                        startKey,
+                                                                        endKey,
+                                                                        limit,
+                                                                        scanType,
+                                                                        query)))
+                            .get();
+
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> scanIterator(String table, int codeFrom, int codeTo,
+                                                int scanType, byte[] query) {
+        //TODO: Should be changed when start using hashcode as partitionId.
+        if (log.isDebugEnabled()) {
+            log.debug("scanIterator-scanType: {}", scanType);
+        }
+        return GrpcKvIteratorImpl.of(this,
+                                     this.storeStreamClient.doScan(this, table
+                                             , HgOwnerKey.newEmpty().codeToKey(codeFrom)
+                                             , HgOwnerKey.newEmpty().codeToKey(codeTo)
+                                             , HgStoreClientConst.NO_LIMIT
+                                             , HgKvStore.SCAN_PREFIX_BEGIN |
+                                               HgKvStore.SCAN_HASHCODE | scanType
+                                             , query
+                                     )
+        );
+    }
+
+    @Override
+    public List<HgKvIterator<HgKvEntry>> scanBatch(HgScanQuery scanQuery) {
+        return Collections.singletonList(GrpcKvIteratorImpl.of(this,
+                                                               this.storeStreamClient.doBatchScan(
+                                                                       this, scanQuery)
+        ));
+    }
+
+    @Override
+    public KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch2(HgScanQuery scanQuery) {
+        throw new RuntimeException("not implemented");
+    }
+
+    @Override
+    public KvCloseableIterator<HgKvIterator<HgKvEntry>> scanBatch3(HgScanQuery scanQuery,
+                                                                   KvCloseableIterator iterator) {
+        return this.storeStreamClient.doBatchScan3(this, scanQuery, iterator);
+    }
+
+    private Supplier<Boolean> getSwitcherSupplier(long limit) {
+        return () -> limit <= 0 || limit > hgStoreClientConfig.getNetKvScannerPageSize();
+    }
+
+    @Override
+    public String toString() {
+        return "storeNodeSession: {" + storeNode + ", graphName: \"" + graphName + "\"}";
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java
new file mode 100644
index 0000000..794a7c1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.getHeader;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.grpc.common.GraphMethod;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.common.TableMethod;
+import org.apache.hugegraph.store.grpc.session.Agg;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.grpc.session.BatchGetReq;
+import org.apache.hugegraph.store.grpc.session.BatchReq;
+import org.apache.hugegraph.store.grpc.session.BatchWriteReq;
+import org.apache.hugegraph.store.grpc.session.CleanReq;
+import org.apache.hugegraph.store.grpc.session.FeedbackRes;
+import org.apache.hugegraph.store.grpc.session.GetReq;
+import org.apache.hugegraph.store.grpc.session.GraphReq;
+import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc;
+import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc.HgStoreSessionBlockingStub;
+import org.apache.hugegraph.store.grpc.session.TableReq;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+import io.grpc.Deadline;
+import io.grpc.ManagedChannel;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/11/18
+ *
+ * @version 0.5.0
+ */
+@Slf4j
+@ThreadSafe
+class GrpcStoreSessionClient extends AbstractGrpcClient {
+
+    @Override
+    public HgStoreSessionBlockingStub getBlockingStub(ManagedChannel channel) {
+        HgStoreSessionBlockingStub stub;
+        stub = HgStoreSessionGrpc.newBlockingStub(channel);
+        return stub;
+    }
+
+    private HgStoreSessionBlockingStub getBlockingStub(HgStoreNodeSession nodeSession) {
+        HgStoreSessionBlockingStub stub =
+                (HgStoreSessionBlockingStub) getBlockingStub(
+                        nodeSession.getStoreNode().getAddress());
+        return stub;
+    }
+
+    FeedbackRes doGet(HgStoreNodeSession nodeSession, String table, HgOwnerKey ownerKey) {
+        if (log.isDebugEnabled()) {
+            log.debug("doGet: {}-{}-{}-{}", nodeSession, table, ownerKey, GetReq.newBuilder()
+                                                                                .setHeader(
+                                                                                        GrpcUtil.getHeader(
+                                                                                                nodeSession))
+                                                                                .setTk(GrpcUtil.toTk(
+                                                                                        table,
+                                                                                        ownerKey))
+                                                                                .build());
+        }
+        return this.getBlockingStub(nodeSession)
+                   .get2(GetReq.newBuilder()
+                               .setHeader(GrpcUtil.getHeader(nodeSession))
+                               .setTk(GrpcUtil.toTk(table, ownerKey))
+                               .build()
+                   );
+    }
+
+    FeedbackRes doClean(HgStoreNodeSession nodeSession, int partId) {
+        return this.getBlockingStub(nodeSession)
+                   .clean(CleanReq.newBuilder()
+                                  .setHeader(GrpcUtil.getHeader(nodeSession))
+                                  .setPartition(partId)
+                                  .build()
+                   );
+    }
+
+    FeedbackRes doBatchGet(HgStoreNodeSession nodeSession, String table, List<HgOwnerKey> keyList) {
+        BatchGetReq.Builder builder = BatchGetReq.newBuilder();
+        builder.setHeader(GrpcUtil.getHeader(nodeSession)).setTable(table);
+
+        for (HgOwnerKey key : keyList) {
+            builder.addKey(GrpcUtil.toKey(key));
+        }
+
+        if (log.isDebugEnabled()) {
+            log.debug("batchGet2: {}-{}-{}-{}", nodeSession, table, keyList, builder.build());
+        }
+        return this.getBlockingStub(nodeSession).batchGet2(builder.build());
+
+    }
+
+    FeedbackRes doBatch(HgStoreNodeSession nodeSession, String batchId, List<BatchEntry> entries) {
+        BatchWriteReq.Builder writeReq = BatchWriteReq.newBuilder();
+        writeReq.addAllEntry(entries);
+        return this.getBlockingStub(nodeSession)
+                   .batch(BatchReq.newBuilder()
+                                  .setHeader(GrpcUtil.getHeader(nodeSession))
+                                  .setWriteReq(writeReq)
+                                  .setBatchId(batchId)
+                                  .build()
+                   );
+    }
+
+    FeedbackRes doTable(HgStoreNodeSession nodeSession, String table, TableMethod method) {
+        return this.getBlockingStub(nodeSession)
+                   .table(TableReq.newBuilder()
+                                  .setHeader(GrpcUtil.getHeader(nodeSession))
+                                  .setTableName(table)
+                                  .setMethod(method)
+                                  .build()
+                   );
+    }
+
+    FeedbackRes doGraph(HgStoreNodeSession nodeSession, String graph, GraphMethod method) {
+        return this.getBlockingStub(nodeSession)
+                   .graph(GraphReq.newBuilder()
+                                  .setHeader(GrpcUtil.getHeader(nodeSession))
+                                  .setGraphName(graph)
+                                  .setMethod(method)
+                                  .build()
+                   );
+    }
+
+    public long count(HgStoreNodeSession nodeSession, String table) {
+        Agg agg = this.getBlockingStub(nodeSession).withDeadline(Deadline.after(24, TimeUnit.HOURS))
+                      .count(ScanStreamReq.newBuilder()
+                                          .setHeader(getHeader(nodeSession))
+                                          .setTable(table)
+                                          .setMethod(ScanMethod.ALL)
+                                          .build()
+                      );
+        return agg.getCount();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java
new file mode 100644
index 0000000..be20d06
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc;
+import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc.HgStoreStateBlockingStub;
+import org.apache.hugegraph.store.grpc.state.ScanState;
+import org.apache.hugegraph.store.grpc.state.SubStateReq;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.AbstractBlockingStub;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ *
+ */
+@Slf4j
+@ThreadSafe
+public class GrpcStoreStateClient extends AbstractGrpcClient {
+
+    private final PDConfig pdConfig;
+    private final PDClient pdClient;
+
+    public GrpcStoreStateClient(PDConfig pdConfig) {
+        this.pdConfig = pdConfig;
+        pdClient = PDClient.create(this.pdConfig);
+    }
+
+    public Set<ScanState> getScanState() throws Exception {
+        try {
+            List<Metapb.Store> activeStores = pdClient.getActiveStores();
+            Set<ScanState> states = activeStores.parallelStream().map(node -> {
+                String address = node.getAddress();
+                HgStoreStateBlockingStub stub = (HgStoreStateBlockingStub) getBlockingStub(address);
+                SubStateReq req = SubStateReq.newBuilder().build();
+                return stub.getScanState(req);
+            }).collect(Collectors.toSet());
+            return states;
+        } catch (Exception e) {
+            throw e;
+        }
+
+    }
+
+    @Override
+    public AbstractBlockingStub getBlockingStub(ManagedChannel channel) {
+        HgStoreStateBlockingStub stub;
+        stub = HgStoreStateGrpc.newBlockingStub(channel);
+        return stub;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java
new file mode 100644
index 0000000..93cfe7a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamBlockingStub;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.AbstractAsyncStub;
+import io.grpc.stub.AbstractBlockingStub;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/19
+ *
+ * @version 1.1.1 added synchronized in getChannel.
+ */
+@Slf4j
+@ThreadSafe
+public class GrpcStoreStreamClient extends AbstractGrpcClient {
+
+    public HgStoreStreamStub getStub(HgStoreNodeSession nodeSession) {
+        return (HgStoreStreamStub) getAsyncStub(nodeSession.getStoreNode().getAddress());
+    }
+
+    @Override
+    public AbstractAsyncStub getAsyncStub(ManagedChannel channel) {
+        return HgStoreStreamGrpc.newStub(channel);
+    }
+
+    private HgStoreStreamBlockingStub getBlockingStub(HgStoreNodeSession nodeSession) {
+        return (HgStoreStreamBlockingStub) getBlockingStub(nodeSession.getStoreNode().getAddress());
+    }
+
+    @Override
+    public AbstractBlockingStub getBlockingStub(ManagedChannel channel) {
+        return HgStoreStreamGrpc.newBlockingStub(channel);
+    }
+
+    KvCloseableIterator<Kv> doScanOneShot(HgStoreNodeSession nodeSession, String table, long limit,
+                                          byte[] query) {
+        return KvOneShotScanner.scanAll(nodeSession
+                , this.getBlockingStub(nodeSession)
+                , table
+                , limit
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doScanOneShot(HgStoreNodeSession nodeSession, String table,
+                                          long limit) {
+        return KvOneShotScanner.scanAll(nodeSession
+                , this.getBlockingStub(nodeSession)
+                , table
+                , limit
+                , null
+        );
+    }
+
+    KvCloseableIterator<Kv> doScanOneShot(HgStoreNodeSession nodeSession, String table,
+                                          HgOwnerKey prefix, long limit) {
+        return KvOneShotScanner.scanPrefix(nodeSession
+                , this.getBlockingStub(nodeSession)
+                , table
+                , prefix
+                , limit
+                , null
+        );
+    }
+
+    KvCloseableIterator<Kv> doScanOneShot(HgStoreNodeSession nodeSession, String table,
+                                          HgOwnerKey prefix, long limit,
+                                          byte[] query) {
+        return KvOneShotScanner.scanPrefix(nodeSession
+                , this.getBlockingStub(nodeSession)
+                , table
+                , prefix
+                , limit
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doScanOneShot(HgStoreNodeSession nodeSession, String table,
+                                          HgOwnerKey startKey,
+                                          HgOwnerKey endKey
+            , long limit
+            , int scanType
+            , byte[] query) {
+
+        return KvOneShotScanner.scanRange(nodeSession
+                , this.getBlockingStub(nodeSession)
+                , table
+                , startKey
+                , endKey
+                , limit
+                , scanType
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doScan(HgStoreNodeSession nodeSession
+            , String table
+            , long limit
+            , byte[] query) {
+
+        return KvPageScanner.scanAll(nodeSession
+                , this.getStub(nodeSession)
+                , table
+                , limit
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doScan(HgStoreNodeSession nodeSession
+            , String table
+            , long limit) {
+
+        return KvPageScanner.scanAll(nodeSession
+                , this.getStub(nodeSession)
+                , table
+                , limit
+                , null
+        );
+    }
+
+    KvCloseableIterator<Kv> doScan(HgStoreNodeSession nodeSession
+            , String table
+            , HgOwnerKey prefix
+            , long limit) {
+
+        return KvPageScanner.scanPrefix(nodeSession
+                , this.getStub(nodeSession)
+                , table
+                , prefix
+                , limit
+                , null
+        );
+    }
+
+    KvCloseableIterator<Kv> doScan(HgStoreNodeSession nodeSession
+            , String table
+            , HgOwnerKey prefix
+            , long limit
+            , byte[] query) {
+
+        return KvPageScanner.scanPrefix(nodeSession
+                , this.getStub(nodeSession)
+                , table
+                , prefix
+                , limit
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doScan(HgStoreNodeSession nodeSession
+            , String table
+            , HgOwnerKey startKey
+            , HgOwnerKey endKey
+            , long limit
+            , int scanType
+            , byte[] query) {
+
+        return KvPageScanner.scanRange(nodeSession
+                , this.getStub(nodeSession)
+                , table
+                , startKey
+                , endKey
+                , limit
+                , scanType
+                , query
+        );
+    }
+
+    KvCloseableIterator<Kv> doBatchScan(HgStoreNodeSession nodeSession, HgScanQuery scanQuery) {
+        return KvBatchScanner5.scan(nodeSession, this.getStub(nodeSession), scanQuery);
+    }
+
+    // 返回多个小的迭代器,允许上层并行处理
+    KvCloseableIterator<HgKvIterator<HgKvEntry>> doBatchScan3(HgStoreNodeSession nodeSession,
+                                                              HgScanQuery scanQuery,
+                                                              KvCloseableIterator iterator) {
+        KvBatchScanner.scan(this.getStub(nodeSession), nodeSession.getGraphName(), scanQuery,
+                            iterator);
+        return iterator;
+    }
+
+    KvCloseableIterator<Kv> doBatchScanOneShot(HgStoreNodeSession nodeSession,
+                                               HgScanQuery scanQuery) {
+        return KvBatchOneShotScanner.scan(nodeSession, this.getBlockingStub(nodeSession),
+                                          scanQuery);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java
new file mode 100644
index 0000000..2191f7a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.grpc.common.Header;
+import org.apache.hugegraph.store.grpc.common.Key;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.common.Tk;
+import org.apache.hugegraph.store.grpc.common.Tkv;
+import org.apache.hugegraph.store.grpc.common.Tp;
+import org.apache.hugegraph.store.grpc.common.Tse;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+
+/**
+ * 2022/1/19
+ */
+final class GrpcUtil {
+
+    private static final ThreadLocal<Key.Builder> keyBuilder = new ThreadLocal<Key.Builder>();
+
+    static Header getHeader(HgStoreNodeSession nodeSession) {
+        return Header.newBuilder()
+                     .setGraph(nodeSession.getGraphName())
+                     .build();
+    }
+
+    static Tk toTk(String table, HgOwnerKey ownerKey) {
+        return Tk.newBuilder()
+                 .setTable(table)
+                 .setKey(ByteString.copyFrom(ownerKey.getKey()))
+                 .setCode(ownerKey.getKeyCode())
+                 .build();
+    }
+
+    static Key.Builder getOwnerKeyBuilder() {
+        Key.Builder builder = keyBuilder.get();
+        if (builder == null) {
+            builder = Key.newBuilder();
+            // TODO 线程级变量,寻找删除时机
+            keyBuilder.set(builder);
+        }
+        return builder;
+    }
+
+    static Key toKey(HgOwnerKey ownerKey, Key.Builder builder) {
+        if (ownerKey == null) {
+            return null;
+        }
+        return builder
+                .setKey(ByteString.copyFrom(ownerKey.getKey()))
+                .setCode(ownerKey.getKeyCode())
+                .build();
+    }
+
+    static Key toKey(HgOwnerKey ownerKey) {
+        if (ownerKey == null) {
+            return null;
+        }
+        Key.Builder builder = keyBuilder.get();
+        if (builder == null) {
+            builder = Key.newBuilder();
+            // TODO 线程级变量,寻找删除时机
+            keyBuilder.set(builder);
+        }
+        return builder
+                .setKey(ByteString.copyFrom(ownerKey.getKey()))
+                .setCode(ownerKey.getKeyCode())
+                .build();
+    }
+
+    static Tkv toTkv(String table, HgOwnerKey ownerKey, byte[] value) {
+        return Tkv.newBuilder()
+                  .setTable(table)
+                  .setKey(ByteString.copyFrom(ownerKey.getKey()))
+                  .setValue(ByteString.copyFrom(value))
+                  .setCode(ownerKey.getKeyCode())
+                  .build();
+    }
+
+    static Tp toTp(String table, HgOwnerKey ownerKey) {
+        return Tp.newBuilder()
+                 .setTable(table)
+                 .setPrefix(ByteString.copyFrom(ownerKey.getKey()))
+                 .setCode(ownerKey.getKeyCode())
+                 .build();
+    }
+
+    static Tse toTse(String table, HgOwnerKey startKey, HgOwnerKey endKey) {
+        return Tse.newBuilder()
+                  .setTable(table)
+                  .setStart(toKey(startKey))
+                  .setEnd(toKey(endKey))
+                  .build();
+
+    }
+
+    static List<HgKvEntry> toList(List<Kv> kvList) {
+        if (kvList == null || kvList.isEmpty()) {
+            return HgStoreClientConst.EMPTY_LIST;
+        }
+
+        Iterator<Kv> iter = kvList.iterator();
+        List<HgKvEntry> resList = new ArrayList<>(kvList.size());
+
+        while (iter.hasNext()) {
+            Kv entry = iter.next();
+            resList.add(new GrpcKvEntryImpl(entry.getKey().toByteArray(),
+                                            entry.getValue().toByteArray(), entry.getCode()));
+        }
+
+        return resList;
+    }
+
+    static StatusRuntimeException toErr(String msg) {
+        return new StatusRuntimeException(Status.UNKNOWN.withDescription(msg));
+    }
+
+    static ByteString toBs(byte[] bytes) {
+        return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java
new file mode 100644
index 0000000..166e091
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.EMPTY_POSITION;
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.createQueryReq;
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.getHeader;
+
+import java.util.Iterator;
+import java.util.List;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/04/08
+ */
+@Slf4j
+@NotThreadSafe
+class KvBatchOneShotScanner implements KvCloseableIterator<Kv>, HgPageSize, HgSeekAble {
+
+    private final HgStoreNodeSession nodeSession;
+    private final HgStoreStreamGrpc.HgStoreStreamBlockingStub stub;
+    private final HgScanQuery scanQuery;
+
+    private Iterator<Kv> iterator;
+    private List<Kv> list = null;
+
+    private KvBatchOneShotScanner(HgStoreNodeSession nodeSession,
+                                  HgStoreStreamGrpc.HgStoreStreamBlockingStub stub,
+                                  HgScanQuery scanQuery) {
+
+        this.nodeSession = nodeSession;
+        this.stub = stub;
+        this.scanQuery = scanQuery;
+    }
+
+    public static KvCloseableIterator scan(HgStoreNodeSession nodeSession,
+                                           HgStoreStreamGrpc.HgStoreStreamBlockingStub stub,
+                                           HgScanQuery scanQuery) {
+
+        return new KvBatchOneShotScanner(nodeSession, stub, scanQuery);
+    }
+
+    private ScanStreamBatchReq createReq() {
+        return ScanStreamBatchReq.newBuilder()
+                                 .setHeader(getHeader(this.nodeSession))
+                                 .setQueryRequest(createQueryReq(this.scanQuery, Integer.MAX_VALUE))
+                                 .build();
+    }
+
+    private Iterator<Kv> createIterator() {
+        this.list = this.stub.scanBatchOneShot(this.createReq()).getDataList();
+        return this.list.iterator();
+    }
+
+    /*** Iterator ***/
+    @Override
+    public boolean hasNext() {
+        if (this.iterator == null) {
+            this.iterator = this.createIterator();
+        }
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public Kv next() {
+        if (this.iterator == null) {
+            this.iterator = this.createIterator();
+        }
+        return this.iterator.next();
+    }
+
+    @Override
+    public long getPageSize() {
+        return Integer.MAX_VALUE;
+    }
+
+    @Override
+    public boolean isPageEmpty() {
+        return !this.iterator.hasNext();
+    }
+
+    @Override
+    public byte[] position() {
+        //TODO: to implement
+        return EMPTY_POSITION;
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        //TODO: to implement
+    }
+
+    @Override
+    public void close() {
+        //Nothing to do
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java
new file mode 100644
index 0000000..dce6563
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.createQueryReq;
+
+import java.io.Closeable;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.buffer.KVByteBuffer;
+import org.apache.hugegraph.store.client.util.PropertyUtil;
+import org.apache.hugegraph.store.grpc.common.Header;
+import org.apache.hugegraph.store.grpc.common.ScanOrderType;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc;
+import org.apache.hugegraph.store.grpc.stream.KvStream;
+import org.apache.hugegraph.store.grpc.stream.ScanReceiptRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 批量流式查询客户端实现类
+ * <p>
+ * created on 2022/07/23
+ *
+ * @version 3.0.0
+ */
+@Slf4j
+@NotThreadSafe
+public class KvBatchScanner implements Closeable {
+
+    static final Supplier<HgKvIterator<HgKvEntry>> NO_DATA = () -> null;
+    static int maxTaskSizePerStore = PropertyUtil.getInt("net.kv.scanner.task.size", 8);
+    private final StreamObserver<ScanStreamBatchReq> sender; // 命令发送器
+    private final KvBatchScannerMerger notifier; // 数据通知
+    private final String graphName; // 图名
+    private final HgScanQuery scanQuery;
+    private final ScanReceiptRequest.Builder responseBuilder = ScanReceiptRequest.newBuilder();
+    private final KvBatchReceiver receiver;
+    volatile int currentSeqNo = 0;
+    private volatile boolean running;
+
+    public KvBatchScanner(
+            HgStoreStreamGrpc.HgStoreStreamStub stub,
+            String graphName,
+            HgScanQuery scanQuery,
+            KvCloseableIterator iterator) {
+
+        this.graphName = graphName;
+        this.notifier = (KvBatchScannerMerger) iterator;
+        this.notifier.registerScanner(this);
+        this.running = true;
+        this.scanQuery = scanQuery;
+        receiver =
+                new KvBatchReceiver(this, scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT);
+        sender = stub.scanBatch2(receiver);
+        sendQuery(this.scanQuery); // 发送查询请求
+    }
+
+    /**
+     * 构建流式查询迭代器
+     * scanQuery进行拆分,启动多个流式请求,提升store的并发性
+     *
+     * @param scanQuery scanQuery
+     * @param handler   task handler
+     * @return data merger iterator
+     */
+    public static KvCloseableIterator ofMerger(
+            HgScanQuery scanQuery, BiFunction<HgScanQuery, KvCloseableIterator, Boolean> handler) {
+        KvBatchScannerMerger merger;
+        if (scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT) {
+            merger = new KvBatchScannerMerger.SortedScannerMerger(
+                    new TaskSplitter(scanQuery, handler));
+        } else {
+            merger = new KvBatchScannerMerger(new TaskSplitter(scanQuery, handler));
+        }
+        merger.startTask();
+        return merger;
+    }
+
+    public static void scan(
+            HgStoreStreamGrpc.HgStoreStreamStub stub,
+            String graphName,
+            HgScanQuery scanQuery,
+            KvCloseableIterator iterator) {
+        new KvBatchScanner(stub, graphName, scanQuery, iterator);
+    }
+
+    /**
+     * 发送查询请求
+     *
+     * @param query scan query
+     */
+    public void sendQuery(HgScanQuery query) {
+        synchronized (this.sender) {
+            if (running) {
+                this.sender.onNext(
+                        ScanStreamBatchReq.newBuilder()
+                                          .setHeader(
+                                                  Header.newBuilder().setGraph(graphName).build())
+                                          .setQueryRequest(createQueryReq(query, 0))
+                                          .build());
+            }
+        }
+    }
+
+    /**
+     * 发送应答
+     */
+    public void sendResponse() {
+        try {
+            sendResponse(currentSeqNo);
+        } catch (Exception e) {
+            log.error("exception", e);
+        }
+    }
+
+    public void sendResponse(int seqNo) {
+        currentSeqNo = seqNo;
+        synchronized (this.sender) {
+            if (running) {
+                this.sender.onNext(
+                        ScanStreamBatchReq.newBuilder()
+                                          .setHeader(
+                                                  Header.newBuilder().setGraph(graphName).build())
+                                          .setReceiptRequest(
+                                                  responseBuilder.setTimes(seqNo).build())
+                                          .build());
+            }
+        }
+    }
+
+    public void dataArrived(Supplier<HgKvIterator<HgKvEntry>> supplier) throws
+                                                                        InterruptedException {
+        notifier.dataArrived(this, supplier);
+    }
+
+    /**
+     * 数据接收结束
+     */
+    public void dataComplete() {
+        close();
+    }
+
+    // 流被关闭
+    @Override
+    public void close() {
+        try {
+            if (notifier.unregisterScanner(this) < 0) {
+                notifier.dataArrived(this, NO_DATA); // 任务结束,唤醒队列
+            }
+        } catch (InterruptedException e) {
+            log.error("exception ", e);
+        }
+        synchronized (this.sender) {
+            try {
+                if (running) {
+                    sender.onCompleted();
+                }
+            } catch (Exception e) {
+            }
+            running = false;
+        }
+    }
+
+    /**
+     * 任务拆分器
+     */
+    static class TaskSplitter {
+
+        final HgScanQuery scanQuery;
+        final BiFunction<HgScanQuery, KvCloseableIterator, Boolean> taskHandler;
+        private KvBatchScannerMerger notifier;
+        private Iterator<HgOwnerKey> prefixItr;
+        private int maxTaskSize = 0; // 最大并行任务数
+        private int maxBatchSize = PropertyUtil.getInt("net.kv.scanner.batch.size", 1000);
+        // 每批次最大点数量
+        private volatile boolean finished = false;
+        private volatile boolean splitting = false;
+        private volatile int nextKeySerialNo = 1;
+
+        public TaskSplitter(HgScanQuery scanQuery,
+                            BiFunction<HgScanQuery, KvCloseableIterator, Boolean> handler) {
+            this.scanQuery = scanQuery;
+            this.taskHandler = handler;
+            if (scanQuery.getScanMethod() == HgScanQuery.ScanMethod.PREFIX) {
+                if (scanQuery.getPrefixItr() != null) {
+                    prefixItr = scanQuery.getPrefixItr();
+                } else {
+                    prefixItr = scanQuery.getPrefixList().listIterator();
+                }
+            }
+        }
+
+        public void setNotifier(KvBatchScannerMerger notifier) {
+            this.notifier = notifier;
+        }
+
+        public boolean isFinished() {
+            return finished;
+        }
+
+        /**
+         * 评估最大任务数
+         */
+        private void evaluateMaxTaskSize() {
+            if (maxTaskSize == 0) { // 根据第一批次任务,得到store数量,然后计算最大任务数
+                if (scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT) {
+                    maxTaskSize = 1; // 点排序,每台机器一个流, 所有store流结束后才能启动其他流
+                } else {
+                    maxTaskSize = this.notifier.getScannerCount() * maxTaskSizePerStore;
+                }
+                maxBatchSize = this.notifier.getScannerCount() * maxBatchSize; // 每台机器最多1000条
+
+                /*
+                 * Limit少于10000时启动一个流,节省网络带宽
+                 */
+                if (scanQuery.getLimit() < maxBatchSize * 30L) {
+                    maxTaskSize = 1;
+                }
+            }
+        }
+
+        /**
+         * 拆分任务,任务拆分为多个grpc请求
+         */
+        public void splitTask() {
+            if (this.finished || this.splitting) {
+                return;
+            }
+            synchronized (this) {
+                if (this.finished) {
+                    return;
+                }
+                this.splitting = true;
+                if (scanQuery.getScanMethod() == HgScanQuery.ScanMethod.PREFIX) {
+                    if (prefixItr.hasNext() &&
+                        (maxTaskSize == 0 || notifier.getScannerCount() < maxTaskSize)) {
+                        List<HgOwnerKey> keys = new ArrayList<>(maxBatchSize);
+                        for (int i = 0; i < maxBatchSize && prefixItr.hasNext(); i++) {
+                            keys.add(prefixItr.next().setSerialNo(nextKeySerialNo++));
+                        }
+                        taskHandler.apply(
+                                HgScanQuery.prefixOf(scanQuery.getTable(), keys,
+                                                     scanQuery.getOrderType()), this.notifier);
+                        // 评估最大任务数
+                        evaluateMaxTaskSize();
+                        if (this.notifier.getScannerCount() < this.maxTaskSize) {
+                            splitTask(); // 未达到最大任务数,继续拆分
+                        }
+                    }
+                    this.finished = !prefixItr.hasNext();
+                } else {
+                    taskHandler.apply(scanQuery, this.notifier);
+                    this.finished = true;
+                }
+                this.splitting = false;
+            }
+        }
+
+        public synchronized void close() {
+            finished = true;
+        }
+    }
+
+    /**
+     * 查询结果接收器
+     */
+    static class KvBatchReceiver implements StreamObserver<KvStream> {
+
+        KvBatchScanner scanner;
+        boolean sortByVertex;
+
+        KvBatchReceiver(KvBatchScanner scanner, boolean sortByVertex) {
+            this.scanner = scanner;
+            this.sortByVertex = sortByVertex;
+        }
+
+        @Override
+        public void onNext(KvStream value) {
+            try {
+                ByteBuffer buffer = value.getStream();
+                int seqNo = value.getSeqNo();
+                boolean isOver = value.getOver();
+                scanner.dataArrived(
+                        () -> {
+                            scanner.sendResponse(seqNo);
+                            if (isOver) {
+                                scanner.dataComplete();
+                            }
+                            return new KVBytesIterator(buffer, sortByVertex, scanner);
+                        });
+            } catch (InterruptedException e) {
+                close();
+                log.error("exception ", e);
+                throw new RuntimeException(e);
+            }
+        }
+
+        @Override
+        public void onError(Throwable t) {
+            log.error("exception ", t);
+            close();
+        }
+
+        @Override
+        public void onCompleted() {
+            close();
+        }
+
+        private void close() {
+            if (scanner != null) {
+                scanner.close();
+            }
+        }
+    }
+
+    static class KVBytesIterator implements HgKvOrderedIterator<HgKvEntry> {
+
+        private final KvBatchScanner scanner;
+        KVByteBuffer buffer;
+        HgKvEntry entry;
+        // sequence no
+        int sn;
+        boolean hasSN;
+
+        public KVBytesIterator(ByteBuffer buffer, boolean hasNo, KvBatchScanner scanner) {
+            this.buffer = new KVByteBuffer(buffer);
+            this.hasSN = hasNo;
+            this.scanner = scanner;
+        }
+
+        @Override
+        public void close() {
+            // this.scanner.close();
+        }
+
+        @Override
+        public byte[] key() {
+            return entry.key();
+        }
+
+        @Override
+        public byte[] value() {
+            return entry.value();
+        }
+
+        @Override
+        public byte[] position() {
+            return new byte[0];
+        }
+
+        @Override
+        public void seek(byte[] position) {
+            throw new RuntimeException("not implemented");
+        }
+
+        @Override
+        public boolean hasNext() {
+            return buffer.hasRemaining();
+        }
+
+        @Override
+        public HgKvEntry next() {
+            if (hasSN) {
+                sn = buffer.getInt();
+            }
+            entry = new GrpcKvEntryImpl(buffer.getBytes(), buffer.getBytes(), 0);
+            return entry;
+        }
+
+        @Override
+        public long getSequence() {
+            return sn;
+        }
+
+        @Override
+        public int compareTo(HgKvOrderedIterator o) {
+            return Long.compare(this.getSequence(), o.getSequence());
+        }
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java
new file mode 100644
index 0000000..2ee91f6
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java
@@ -0,0 +1,454 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Supplier;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.client.util.Base58;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanCancelRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanReceiptRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/03/23
+ *
+ * @version 5.0.0
+ */
+@Slf4j
+@NotThreadSafe
+class KvBatchScanner5 {
+
+    private final static HgStoreClientConfig storeClientConfig = HgStoreClientConfig.of();
+    //private final static int HAVE_NEXT_TIMEOUT_SECONDS = storeClientConfig
+    // .getNetKvScannerHaveNextTimeout();
+    private final static int HAVE_NEXT_TIMEOUT_SECONDS = 60;
+    private final static long PAGE_SIZE = storeClientConfig.getNetKvScannerPageSize();
+
+    public static KvCloseableIterator scan(HgStoreNodeSession nodeSession,
+                                           HgStoreStreamGrpc.HgStoreStreamStub stub,
+                                           HgScanQuery scanQuery) {
+        return new OrderConsumer(new OrderBroker(stub, scanQuery, nodeSession));
+    }
+
+    private enum OrderState {
+        NEW(0),
+        WORKING(1),
+        COMPLETED(10);
+
+        int value;
+
+        OrderState(int value) {
+            this.value = value;
+        }
+    }
+
+    /*** Broker ***/
+    private static class OrderBroker {
+
+        public final OrderKeeper keeper = new OrderKeeper();
+        private final HgScanQuery scanQuery;
+        private final StreamObserver<ScanStreamBatchReq> requestObserver;
+        private final ScanStreamBatchReq.Builder reqBuilder;
+        private final ReentrantLock senderLock = new ReentrantLock();
+        private final AtomicBoolean serverFinished = new AtomicBoolean();
+        private final AtomicBoolean clientFinished = new AtomicBoolean();
+        private final ScanReceiptRequest.Builder receiptReqBuilder =
+                ScanReceiptRequest.newBuilder();
+        private final ScanCancelRequest cancelReq = ScanCancelRequest.newBuilder().build();
+        private final HgStoreNodeSession nodeSession;
+        private final OrderAgent agent;
+        private final AtomicLong receivedCount = new AtomicLong();
+        private final AtomicInteger receivedLastTimes = new AtomicInteger();
+        private final BlockingQueue<Integer> timesQueue = new LinkedBlockingQueue();
+        String brokerId = "";
+        private OrderState state = OrderState.NEW;
+
+        OrderBroker(HgStoreStreamGrpc.HgStoreStreamStub stub,
+                    HgScanQuery scanQuery,
+                    HgStoreNodeSession nodeSession) {
+
+            if (log.isDebugEnabled()) {
+                if (scanQuery.getPrefixList() != null && scanQuery.getPrefixList().size() > 0) {
+                    brokerId = Base58.encode(scanQuery.getPrefixList().get(0).getKey());
+
+                    log.debug(
+                            "[ANALYSIS START] [{}] firstKey: {}, keyLength: {}, table: {}, node: {}"
+                            , brokerId
+                            , scanQuery.getPrefixList().get(0)
+                            , scanQuery.getPrefixList().size()
+                            , scanQuery.getTable()
+                            , nodeSession.getStoreNode().getAddress());
+                }
+            }
+
+            this.scanQuery = scanQuery;
+            this.reqBuilder = KvBatchUtil.getRequestBuilder(nodeSession);
+            this.nodeSession = nodeSession;
+            this.agent = new OrderAgent(brokerId);
+            this.requestObserver = stub.scanBatch(agent);
+
+        }
+
+        List<Kv> oneMore() {
+
+            if (this.state == OrderState.NEW) {
+                synchronized (this.state) {
+                    if (this.state == OrderState.NEW) {
+                        this.makeADeal();
+                        this.state = OrderState.WORKING;
+                    }
+                }
+            } else {
+                this.sendReceipt();
+            }
+
+            return this.keeper.pickUp();
+        }
+
+        void receipt(int times) {
+            this.timesQueue.offer(times);
+            receivedLastTimes.set(times);
+        }
+
+        void sendReceipt() {
+            Integer buf = this.timesQueue.poll();
+
+            if (buf == null) {
+                buf = this.receivedLastTimes.get();
+            }
+
+            AtomicInteger timesBuf = new AtomicInteger(buf);
+
+            if (!this.clientFinished.get()) {
+                this.send(() ->
+                                  getReqBuilder().setReceiptRequest(
+                                                         this.receiptReqBuilder.setTimes(timesBuf.get()).build())
+                                                 .build()
+                );
+            }
+        }
+
+        private void makeADeal() {
+            this.send(() -> getReqBuilder()
+                    .setQueryRequest(KvBatchUtil.createQueryReq(scanQuery, PAGE_SIZE)).build()
+            );
+        }
+
+        private void finish(long tookAmt) {
+            this.clientFinished.set(true);
+            if (log.isDebugEnabled()) {
+                log.debug("[ANALYSIS END] [{}] times: {}, received: {}, took: {}"
+                        , this.brokerId
+                        , this.receivedLastTimes.get()
+                        , this.receivedCount.get()
+                        , tookAmt
+                );
+            }
+            if (this.receivedCount.get() != tookAmt) {
+                if (log.isDebugEnabled()) {
+                    log.debug("[ANALYSIS END] [{}] times: {}, received: {}, took: {}"
+                            , this.brokerId
+                            , this.receivedLastTimes.get()
+                            , this.receivedCount.get()
+                            , tookAmt
+                    );
+                }
+            }
+            synchronized (this.state) {
+                if (this.state.value < OrderState.COMPLETED.value) {
+                    this.send(() -> getReqBuilder().setCancelRequest(this.cancelReq).build());
+                    this.state = OrderState.COMPLETED;
+                }
+            }
+        }
+
+        private ScanStreamBatchReq.Builder getReqBuilder() {
+            return this.reqBuilder.clearQueryRequest();
+        }
+
+        private void send(Supplier<ScanStreamBatchReq> supplier) {
+            this.senderLock.lock();
+            try {
+                if (!this.serverFinished.get()) {
+                    this.requestObserver.onNext(supplier.get());
+                }
+                Thread.yield();
+            } finally {
+                this.senderLock.unlock();
+            }
+        }
+
+        private class OrderAgent implements StreamObserver<KvPageRes> {
+
+            private final AtomicInteger count = new AtomicInteger(0);
+            private final AtomicBoolean over = new AtomicBoolean(false);
+            private final String agentId;
+
+            OrderAgent(String agentId) {
+                this.agentId = agentId;
+            }
+
+            @Override
+            public void onNext(KvPageRes value) {
+                if (log.isDebugEnabled()) {
+                    log.debug("Scan [ {} ] [ {} ] times, received: [ {} ]"
+                            , nodeSession.getStoreNode().getAddress(), value.getTimes(),
+                              value.getDataList().size());
+                }
+
+                serverFinished.set(value.getOver());
+
+                List<Kv> buffer = value.getDataList();
+                count.addAndGet(buffer.size());
+                if (log.isDebugEnabled()) {
+                    if (value.getOver()) {
+                        log.debug("[ANALYSIS OVER] [{}] count: {}", agentId, count);
+                    }
+                }
+                keeper.receive(buffer, value.getTimes());
+                this.over.set(value.getOver());
+                this.checkOver(value.getTimes());
+            }
+
+            private void checkOver(int times) {
+                if (this.over.get()) {
+                    requestObserver.onCompleted();
+                    keeper.done(times);
+                }
+            }
+
+            @Override
+            public void onError(Throwable t) {
+                log.error("received server onError event, Throwable:", t);
+                keeper.shout(t);
+            }
+
+            @Override
+            public void onCompleted() {
+                if (log.isDebugEnabled()) {
+                    log.debug("received sever completed event.");
+                }
+                serverFinished.set(true);
+
+            }
+
+        }
+
+        /*** Inventory Keeper ***/
+        private class OrderKeeper {
+
+            private final BlockingQueue<Supplier<List<Kv>>> queue = new LinkedBlockingQueue<>();
+            private final ReentrantLock pickUpLock = new ReentrantLock();
+            private final AtomicBoolean done = new AtomicBoolean();
+            private final AtomicBoolean stop = new AtomicBoolean();
+            private int timesOfOver;
+            private int lastTimes;
+            private Throwable serverErr;
+
+            void receive(List<Kv> data, int times) {
+                receivedCount.addAndGet(data.size());
+                this.queue.offer(() -> data);
+                receipt(times);
+
+                this.lastTimes = times;
+            }
+
+            private List<Kv> pickUp() {
+                Supplier<List<Kv>> res;
+
+                pickUpLock.lock();
+                try {
+
+                    if (this.done.get()) {
+                        if (this.stop.get()) {
+                            log.warn("Invoking pickUp method after OrderKeeper has bean closing.");
+                        }
+                        res = this.queue.poll();
+                        if (res == null) {
+                            res = () -> null;
+                        }
+                    } else {
+                        res = this.queue.poll(HAVE_NEXT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+                        if (res == null) {
+                            if (this.done.get()) {
+                                res = () -> null;
+                            } else {
+                                throw HgStoreClientException.of(
+                                        "Timeout, max time: " + HAVE_NEXT_TIMEOUT_SECONDS +
+                                        " seconds" +
+                                        ", isOver: " + this.done.get() +
+                                        ", isStop: " + this.stop.get() +
+                                        ", last-times: " + this.lastTimes +
+                                        ", over-times: " + this.timesOfOver);
+                            }
+                        }
+
+                    }
+                } catch (InterruptedException e) {
+                    log.error(
+                            "Failed to receive List<Kv> from queue because of interruption of " +
+                            "current thread ["
+                            + Thread.currentThread().getName() + "]");
+
+                    Thread.currentThread().interrupt();
+
+                    throw HgStoreClientException.of(
+                            "Failed to receive List<Kv> from queue, cause by:", e);
+                } finally {
+                    pickUpLock.unlock();
+                }
+
+                checkServerErr();
+                return res.get();
+
+            }
+
+            void done(int times) {
+                this.timesOfOver = times;
+                this.done.set(true);
+                this.queue.offer(() -> null);
+            }
+
+            void shout(Throwable t) {
+                this.serverErr = t;
+                log.error("Failed to receive from sever", t);
+                this.queue.offer(() -> null);
+            }
+
+            private void checkServerErr() {
+                if (this.serverErr != null) {
+                    throw HgStoreClientException.of(this.serverErr);
+                }
+            }
+        }
+
+    }
+
+    /* iterator */
+    private static class OrderConsumer implements KvCloseableIterator<Kv>, HgPageSize {
+
+        private final OrderBroker broker;
+        private final String consumerId;
+        private Iterator<Kv> dataIterator;
+        private long tookCount = 0;
+
+        OrderConsumer(OrderBroker broker) {
+            this.broker = broker;
+            consumerId = broker.brokerId;
+        }
+
+        private Iterator<Kv> getIterator() {
+            List<Kv> list = this.broker.oneMore();
+
+            if (log.isDebugEnabled()) {
+                if (list != null && list.isEmpty()) {
+                    log.debug("[ANALYSIS EMPTY] [{}] , tookCount: {}", consumerId, tookCount);
+                }
+            }
+
+            if (list == null || list.isEmpty()) {
+                return null;
+            } else {
+                return list.iterator();
+            }
+        }
+
+        @Override
+        public void close() {
+            this.broker.finish(this.tookCount);
+        }
+
+        @Override
+        public long getPageSize() {
+            return PAGE_SIZE;
+        }
+
+        @Override
+        public boolean hasNext() {
+
+            if (this.dataIterator == null) {
+                this.dataIterator = this.getIterator();
+            } else {
+                if (this.dataIterator.hasNext()) {
+                    return true;
+                } else {
+                    this.dataIterator = this.getIterator();
+                }
+            }
+
+            if (this.dataIterator == null) {
+                if (log.isDebugEnabled()) {
+                    log.debug("[ANALYSIS NULL -> FALSE] [{}] , tookCount: {}", consumerId,
+                              tookCount);
+                }
+                return false;
+            } else {
+                if (log.isDebugEnabled()) {
+                    if (!this.dataIterator.hasNext()) {
+                        log.debug("[ANALYSIS hasNext -> FALSE] [{}] , tookCount: {}", consumerId,
+                                  tookCount);
+                    }
+                }
+                return this.dataIterator.hasNext();
+            }
+
+        }
+
+        @Override
+        public Kv next() {
+            if (this.dataIterator == null) {
+                if (!this.hasNext()) {
+                    throw new NoSuchElementException();
+                }
+            }
+
+            if (log.isDebugEnabled()) {
+                tookCount++;
+                if (tookCount % 10000 == 0) {
+                    log.debug("[ANALYSIS NEXT] [{}] , tookCount: {}", consumerId, tookCount);
+                }
+            }
+            return this.dataIterator.next();
+        }
+
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java
new file mode 100644
index 0000000..4f666c9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.client.util.PropertyUtil;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 批量查询结果归并,阻塞队列工作模式
+ * 对请求任务的拆分,创建多个请求队列
+ */
+@Slf4j
+public class KvBatchScannerMerger implements KvCloseableIterator<HgKvIterator<HgKvEntry>>,
+                                             HgPageSize {
+
+    static int maxWaitCount = PropertyUtil.getInt("net.kv.scanner.wait.timeout", 60);
+    protected final BlockingQueue<Supplier<HgKvIterator<HgKvEntry>>> queue =
+            new LinkedBlockingQueue<>();
+    private final KvBatchScanner.TaskSplitter taskSplitter;
+    private final List<KvBatchScanner> scanners = new CopyOnWriteArrayList<>();
+    private Supplier<HgKvIterator<HgKvEntry>> current = null;
+
+    public KvBatchScannerMerger(KvBatchScanner.TaskSplitter splitter) {
+        this.taskSplitter = splitter;
+        splitter.setNotifier(this);
+    }
+
+    public void startTask() {
+        taskSplitter.splitTask();
+    }
+
+    public void dataArrived(KvBatchScanner scanner, Supplier<HgKvIterator<HgKvEntry>> supplier)
+            throws InterruptedException {
+        queue.put(supplier);
+    }
+
+    @Override
+    public boolean hasNext() {
+        int waitTime = 0;
+        while (current == null) {
+            try {
+                // 队列有数据,还有活跃的查询器,任务未分配完
+                if (queue.size() != 0 || scanners.size() > 0 || !taskSplitter.isFinished()) {
+                    current = queue.poll(1, TimeUnit.SECONDS);  //定期检查client是否被关闭了
+                } else {
+                    break;
+                }
+                if (current == null) {
+                    // 超时重试
+                    sendTimeout();
+                    if (++waitTime > maxWaitCount) {
+                        log.error(
+                                "KvBatchScanner wait data timeout {}, closeables is {}, task is {}",
+                                waitTime, scanners.size(), taskSplitter.isFinished());
+                        break;
+                    }
+                }
+            } catch (InterruptedException e) {
+                log.error("hasNext interrupted {}", e);
+                throw new RuntimeException(e.getMessage(), e);
+            }
+        }
+        return current != null && current != KvBatchScanner.NO_DATA;
+    }
+
+    @Override
+    public HgKvIterator<HgKvEntry> next() {
+        HgKvIterator<HgKvEntry> iterator = null;
+        if (current != null) {
+            iterator = current.get();
+        }
+        current = null;
+        return iterator;
+    }
+
+    @Override
+    public void close() {
+        taskSplitter.close();
+        scanners.forEach(c -> c.close());
+    }
+
+    private void sendTimeout() {
+        scanners.forEach(v -> {
+            v.sendResponse();
+        });
+    }
+
+    @Override
+    public long getPageSize() {
+        return 0;
+    }
+
+    public void registerScanner(KvBatchScanner closeable) {
+        this.scanners.add(closeable);
+    }
+
+    /**
+     * 返回值<0表示任务结束
+     *
+     * @param closeable
+     * @return
+     */
+    public int unregisterScanner(KvBatchScanner closeable) {
+        this.scanners.remove(closeable);
+        try {
+            taskSplitter.splitTask();
+        } catch (Exception e) {
+            log.error("exception ", e);
+        }
+        return taskSplitter.isFinished() && this.scanners.size() == 0 ?
+               -1 : this.scanners.size();
+    }
+
+    public int getScannerCount() {
+        return this.scanners.size();
+    }
+
+    /**
+     * 组装一个Scanner的多个有序迭代器为一个迭代器
+     */
+    static class ScannerDataQueue {
+
+        private BlockingQueue<Supplier<HgKvIterator<HgKvEntry>>> queue;
+        private HgKvOrderedIterator<HgKvEntry> iterator = null;
+        private int currentSN = 0;
+        private HgKvEntry entry;
+
+        public ScannerDataQueue() {
+            queue = new LinkedBlockingQueue<>();
+        }
+
+        public int sn() {
+            return currentSN;
+        }
+
+        public void add(Supplier<HgKvIterator<HgKvEntry>> supplier) {
+            if (queue != null) {
+                queue.add(supplier);
+            }
+        }
+
+        /**
+         * 迭代器是否有效,如果没有数据,等待数据到达
+         *
+         * @return
+         */
+        public boolean hasNext() {
+            while (entry == null && queue != null) {
+                try {
+                    int waitTime = 0;
+                    Supplier<HgKvIterator<HgKvEntry>> current;
+                    current = queue.poll(1, TimeUnit.SECONDS);  //定期检查client是否被关闭了
+                    if (current == null) {
+                        if (++waitTime > maxWaitCount) {
+                            break;
+                        }
+                    } else if (current == KvBatchScanner.NO_DATA) {
+                        queue = null;
+                        break;
+                    } else {
+                        iterator = (HgKvOrderedIterator<HgKvEntry>) current.get();
+                        if (iterator != null && iterator.hasNext()) {
+                            moveNext();
+                        } else {
+                            iterator = null;
+                        }
+                    }
+                } catch (InterruptedException e) {
+                    log.error("hasNext interrupted {}", e);
+                    throw new RuntimeException(e.getMessage(), e);
+                }
+            }
+            return entry != null;
+        }
+
+        public HgKvEntry next() {
+            HgKvEntry current = entry;
+            moveNext();
+            return current;
+        }
+
+        private void moveNext() {
+            if (iterator.hasNext()) {
+                entry = iterator.next();
+                currentSN = (int) iterator.getSequence();
+            } else {
+                entry = null;
+                iterator = null;
+            }
+        }
+    }
+
+    /**
+     * 对多个Scanner返回结果进行归并排序
+     */
+    static class SortedScannerMerger extends KvBatchScannerMerger {
+
+        // 每一个流对应一个接收队列
+        private final Map<KvBatchScanner, ScannerDataQueue> scannerQueues =
+                new ConcurrentHashMap<>();
+
+        public SortedScannerMerger(KvBatchScanner.TaskSplitter splitter) {
+            super(splitter);
+            queue.add(() -> {
+                // 对store返回结果进行归并排序
+                return new HgKvIterator<>() {
+                    private ScannerDataQueue iterator;
+                    private int currentSN = 0;
+                    private HgKvEntry entry;
+
+                    @Override
+                    public byte[] key() {
+                        return entry.key();
+                    }
+
+                    @Override
+                    public byte[] value() {
+                        return entry.value();
+                    }
+
+                    @Override
+                    public void close() {
+
+                    }
+
+                    @Override
+                    public byte[] position() {
+                        return new byte[0];
+                    }
+
+                    @Override
+                    public void seek(byte[] position) {
+                        throw new RuntimeException("not implemented");
+                    }
+
+                    @Override
+                    public boolean hasNext() {
+                        //
+                        if (iterator == null || !iterator.hasNext() || currentSN != iterator.sn()) {
+                            iterator = selectIterator();
+                        }
+
+                        if (iterator != null) {
+                            currentSN = iterator.sn();
+                        }
+                        return iterator != null;
+                    }
+
+                    @Override
+                    public HgKvEntry next() {
+                        entry = iterator.next();
+                        return entry;
+                    }
+                };
+            });
+        }
+
+        /**
+         * 从多个Scanner中挑选一个sn最小的迭代器
+         * 如果Scanner没有数据,等待数据到达。
+         *
+         * @return
+         */
+        private ScannerDataQueue selectIterator() {
+            int sn = Integer.MAX_VALUE;
+            ScannerDataQueue current = null;
+            while (current == null && !scannerQueues.isEmpty()) {
+                Iterator<KvBatchScanner> itr = scannerQueues.keySet().iterator();
+                while (itr.hasNext()) {
+                    KvBatchScanner key = itr.next();
+                    ScannerDataQueue kvItr = scannerQueues.get(key);
+                    if (!kvItr.hasNext()) {
+                        scannerQueues.remove(key);
+                        continue;
+                    }
+                    if (kvItr.sn() <= sn) {
+                        sn = kvItr.sn();
+                        current = kvItr;
+                    }
+                }
+            }
+            return current;
+        }
+
+        @Override
+        public void registerScanner(KvBatchScanner scanner) {
+            super.registerScanner(scanner);
+            scannerQueues.putIfAbsent(scanner, new ScannerDataQueue());
+        }
+
+        @Override
+        public int unregisterScanner(KvBatchScanner scanner) {
+            dataArrived(scanner, KvBatchScanner.NO_DATA);
+            return super.unregisterScanner(scanner);
+        }
+
+        @Override
+        public void dataArrived(KvBatchScanner scanner,
+                                Supplier<HgKvIterator<HgKvEntry>> supplier) {
+            scannerQueues.putIfAbsent(scanner, new ScannerDataQueue());
+            scannerQueues.get(scanner).add(supplier);
+        }
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java
new file mode 100644
index 0000000..7f79aec
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.List;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.grpc.common.Header;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.stream.ScanCondition;
+import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq;
+
+import com.google.protobuf.ByteString;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/04/23
+ */
+@Slf4j
+@NotThreadSafe
+class KvBatchUtil {
+
+    static final byte[] EMPTY_POSITION = HgStoreClientConst.EMPTY_BYTES;
+
+    static ScanStreamBatchReq.Builder getRequestBuilder(HgStoreNodeSession nodeSession) {
+        return ScanStreamBatchReq.newBuilder().setHeader(getHeader(nodeSession));
+    }
+
+    static ScanQueryRequest createQueryReq(HgScanQuery scanQuery, long pageSize) {
+
+        ScanQueryRequest.Builder qb = ScanQueryRequest.newBuilder();
+        ScanCondition.Builder cb = ScanCondition.newBuilder();
+
+        qb.setLimit(getLimit(scanQuery.getLimit()));
+        qb.setPerKeyLimit(getLimit(scanQuery.getPerKeyLimit()));
+        qb.setPerKeyMax(getLimit(scanQuery.getPerKeyMax()));
+
+        switch (scanQuery.getScanMethod()) {
+            case ALL:
+                qb.setMethod(ScanMethod.ALL);
+                break;
+            case PREFIX:
+                qb.setMethod(ScanMethod.PREFIX);
+                addPrefixCondition(scanQuery, qb, cb);
+                break;
+            case RANGE:
+                qb.setMethod(ScanMethod.RANGE);
+                addRangeCondition(scanQuery, qb, cb);
+                break;
+            default:
+                throw new RuntimeException("Unsupported ScanType: " + scanQuery.getScanMethod());
+        }
+
+        qb.setTable(scanQuery.getTable());
+        qb.setPageSize(pageSize);
+        qb.setQuery(toBs(scanQuery.getQuery()));
+        qb.setScanType(scanQuery.getScanType());
+        qb.setOrderType(scanQuery.getOrderType());
+        qb.setSkipDegree(scanQuery.getSkipDegree());
+
+        return qb.build();
+    }
+
+    static long getLimit(long limit) {
+        return limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit;
+    }
+
+    static Header getHeader(HgStoreNodeSession nodeSession) {
+        return Header.newBuilder().setGraph(nodeSession.getGraphName()).build();
+    }
+
+    static void addPrefixCondition(HgScanQuery scanQuery, ScanQueryRequest.Builder qb,
+                                   ScanCondition.Builder cb) {
+        List<HgOwnerKey> prefixList = scanQuery.getPrefixList();
+
+        if (prefixList == null || prefixList.isEmpty()) {
+            throw new RuntimeException(
+                    "The start-list of ScanQuery shouldn't to be invalid in ScanMethod.PREFIX " +
+                    "mode.");
+        }
+
+        prefixList.forEach((e) -> {
+            qb.addCondition(cb.clear()
+                              .setPrefix(toBs(e.getKey()))
+                              .setCode(e.getKeyCode())
+                              .setSerialNo(e.getSerialNo())
+                              .build()
+            );
+        });
+
+    }
+
+    static void addRangeCondition(HgScanQuery scanQuery, ScanQueryRequest.Builder qb,
+                                  ScanCondition.Builder cb) {
+        List<HgOwnerKey> startList = scanQuery.getStartList();
+        List<HgOwnerKey> endList = scanQuery.getEndList();
+
+        if (startList == null || startList.isEmpty()) {
+            throw new RuntimeException(
+                    "The start-list of ScanQuery shouldn't to be invalid in ScanMethod.RANGE mode" +
+                    ".");
+        }
+
+        if (endList == null || endList.isEmpty()) {
+            throw new RuntimeException(
+                    "The end-list of ScanQuery shouldn't to be invalid in ScanMethod.RANGE mode.");
+        }
+
+        if (startList.size() != endList.size()) {
+            throw new RuntimeException("The size of start-list not equals end-list's.");
+        }
+
+        for (int i = 0, s = startList.size(); i < s; i++) {
+            HgOwnerKey start = startList.get(i);
+            HgOwnerKey end = endList.get(i);
+            qb.addCondition(cb.clear().setCode(start.getKeyCode())
+                              .setStart(toBs(start.getKey()))
+                              .setEnd(toBs(end.getKey()))
+                              .setSerialNo(start.getSerialNo())
+                              .build()
+            );
+        }
+
+    }
+
+    static HgOwnerKey toOk(HgOwnerKey key) {
+        return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key;
+    }
+
+    static ByteString toBs(byte[] bytes) {
+        return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java
new file mode 100644
index 0000000..d23b3a4
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.io.Closeable;
+import java.util.Iterator;
+
+/**
+ * 2022/3/16
+ */
+public interface KvCloseableIterator<T> extends Iterator<T>, Closeable {
+
+    @Override
+    void close();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java
new file mode 100644
index 0000000..ef807c5
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * 2022/4/6
+ */
+class KvListIterator<T> implements KvCloseableIterator {
+
+    private final Iterator<T> iterator;
+
+    KvListIterator(List<T> list) {
+        this.iterator = list.iterator();
+    }
+
+    @Override
+    public void close() {
+        /*Nothing to do.*/
+    }
+
+    @Override
+    public boolean hasNext() {
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public T next() {
+        return this.iterator.next();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java
new file mode 100644
index 0000000..7375a05
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Iterator;
+import java.util.List;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvStore;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.grpc.common.Header;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamBlockingStub;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+import com.google.protobuf.ByteString;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/12/1
+ */
+@Slf4j
+@NotThreadSafe
+class KvOneShotScanner implements KvCloseableIterator<Kv>, HgPageSize, HgSeekAble {
+
+    private static final HgStoreClientConfig storeClientConfig = HgStoreClientConfig.of();
+    private final HgStoreNodeSession session;
+    private final HgStoreStreamBlockingStub stub;
+    private final ScanStreamReq.Builder reqBuilder = ScanStreamReq.newBuilder();
+    private final String table;
+    private final HgOwnerKey startKey;
+    private final HgOwnerKey endKey;
+    private final HgOwnerKey prefix;
+    private final ScanMethod scanMethod;
+    private final long limit;
+    private final int partition;
+    private final int scanType;
+    private final byte[] query;
+    private final int pageSize;
+    private ScanStreamReq req;
+    private Iterator<Kv> iterator;
+    private List<Kv> list = null;
+    private boolean in = true;
+    private byte[] nodePosition = HgStoreClientConst.EMPTY_BYTES;
+
+    private KvOneShotScanner(ScanMethod scanMethod, HgStoreNodeSession session,
+                             HgStoreStreamBlockingStub stub,
+                             String table, HgOwnerKey prefix, HgOwnerKey startKey,
+                             HgOwnerKey endKey, long limit,
+                             int partition, int scanType, byte[] query) {
+        this.scanMethod = scanMethod;
+        this.session = session;
+        this.stub = stub;
+        this.table = table;
+        this.startKey = toOk(startKey);
+        this.endKey = toOk(endKey);
+        this.prefix = toOk(prefix);
+        this.partition = partition;
+        this.scanType = scanType;
+        this.query = query != null ? query : HgStoreClientConst.EMPTY_BYTES;
+        this.limit = limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE :
+                     limit; // <=0 means no limit
+        this.pageSize = storeClientConfig.getNetKvScannerPageSize();
+
+    }
+
+    public static KvCloseableIterator<Kv> scanAll(HgStoreNodeSession session,
+                                                  HgStoreStreamBlockingStub stub,
+                                                  String table, long limit, byte[] query) {
+        return new KvOneShotScanner(ScanMethod.ALL, session, stub, table, null, null, null, limit,
+                                    -1, HgKvStore.SCAN_ANY,
+                                    query);
+    }
+
+    public static KvCloseableIterator<Kv> scanPrefix(HgStoreNodeSession session,
+                                                     HgStoreStreamBlockingStub stub,
+                                                     String table, HgOwnerKey prefix, long limit,
+                                                     byte[] query) {
+        return new KvOneShotScanner(ScanMethod.PREFIX, session, stub, table, prefix, null, null,
+                                    limit,
+                                    prefix.getKeyCode(), HgKvStore.SCAN_PREFIX_BEGIN, query);
+    }
+
+    public static KvCloseableIterator<Kv> scanRange(HgStoreNodeSession nodeSession,
+                                                    HgStoreStreamBlockingStub stub,
+                                                    String table, HgOwnerKey startKey,
+                                                    HgOwnerKey endKey, long limit,
+                                                    int scanType, byte[] query) {
+        return new KvOneShotScanner(ScanMethod.RANGE, nodeSession, stub, table, null, startKey,
+                                    endKey, limit,
+                                    startKey.getKeyCode(), scanType, query);
+    }
+
+    static HgOwnerKey toOk(HgOwnerKey key) {
+        return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key;
+    }
+
+    static ByteString toBs(byte[] bytes) {
+        return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES);
+    }
+
+    private Header getHeader(HgStoreNodeSession nodeSession) {
+        return Header.newBuilder().setGraph(nodeSession.getGraphName()).build();
+    }
+
+    private void createReq() {
+        this.req = this.reqBuilder
+                .setHeader(this.getHeader(this.session))
+                .setMethod(this.scanMethod)
+                .setTable(this.table)
+                .setStart(toBs(this.startKey.getKey()))
+                .setEnd(toBs(this.endKey.getKey()))
+                .setLimit(this.limit)
+                .setPrefix(toBs(this.prefix.getKey()))
+                .setCode(this.partition)
+                .setScanType(this.scanType)
+                .setQuery(toBs(this.query))
+                .setPageSize(pageSize)
+                .setPosition(toBs(this.nodePosition))
+                .build();
+    }
+
+    private void init() {
+
+        if (this.iterator == null) {
+            this.createReq();
+            this.list = this.stub.scanOneShot(this.req).getDataList();
+            this.iterator = this.list.iterator();
+        }
+
+    }
+
+    @Override
+    public boolean hasNext() {
+        if (!this.in) {
+            return false;
+        }
+        if (this.iterator == null) {
+            this.init();
+        }
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public Kv next() {
+        if (this.iterator == null) {
+            this.init();
+        }
+        return this.iterator.next();
+    }
+
+    @Override
+    public long getPageSize() {
+        return this.limit;
+    }
+
+    @Override
+    public boolean isPageEmpty() {
+        return !this.iterator.hasNext();
+    }
+
+    @Override
+    public byte[] position() {
+        return HgStoreClientUtil.toBytes(this.session.getStoreNode().getNodeId().longValue());
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        if (position == null || position.length < Long.BYTES) {
+            return;
+        }
+        byte[] nodeIdBytes = new byte[Long.BYTES];
+        System.arraycopy(position, 0, nodeIdBytes, 0, Long.BYTES);
+        long nodeId = this.session.getStoreNode().getNodeId().longValue();
+        long pId = HgStoreClientUtil.toLong(nodeIdBytes);
+        this.in = nodeId >= pId;
+        if (this.in && nodeId == pId) {
+            this.nodePosition = new byte[position.length - Long.BYTES];
+            System.arraycopy(position, Long.BYTES, this.nodePosition, 0, this.nodePosition.length);
+        } else {
+            this.nodePosition = HgStoreClientConst.EMPTY_BYTES;
+        }
+    }
+
+    @Override
+    public void close() {
+        //TODO: implements
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java
new file mode 100644
index 0000000..2879a50
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvStore;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgBufferProxy;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.client.util.MetricX;
+import org.apache.hugegraph.store.grpc.common.Header;
+import org.apache.hugegraph.store.grpc.common.Kv;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub;
+import org.apache.hugegraph.store.grpc.stream.KvPageRes;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+import org.apache.hugegraph.store.grpc.stream.SelectParam;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/21
+ *
+ * @version 0.6.1 lynn.bond@hotamil.com on 2022/04/05
+ */
+@Slf4j
+@NotThreadSafe
+class KvPageScanner implements KvCloseableIterator<Kv>, HgPageSize, HgSeekAble {
+
+    private static final HgStoreClientConfig clientConfig = HgStoreClientConfig.of();
+    private static final int nextTimeout = clientConfig.getNetKvScannerHaveNextTimeout();
+    private final HgStoreNodeSession session;
+    private final HgStoreStreamStub stub;
+    private final AtomicBoolean completed = new AtomicBoolean(false);
+    private final SelectParam.Builder selectBuilder = SelectParam.newBuilder();
+    private final BlockingQueue<ScanStreamReq> reqQueue = new LinkedBlockingQueue<>();
+    private int pageSize = clientConfig.getNetKvScannerPageSize();
+    private HgBufferProxy<List<Kv>> proxy;
+    private Iterator<Kv> iterator;
+    private StreamObserver<ScanStreamReq> observer;
+    private ScanStreamReq.Builder reqBuilder = ScanStreamReq.newBuilder();
+    private boolean in = true;
+    private byte[] nodePosition = HgStoreClientConst.EMPTY_BYTES;
+
+    private KvPageScanner(ScanMethod scanMethod, HgStoreNodeSession session, HgStoreStreamStub stub,
+                          String table,
+                          HgOwnerKey prefix, HgOwnerKey startKey, HgOwnerKey endKey, long limit,
+                          int partition,
+                          int scanType, byte[] query) {
+        this.session = session;
+        this.stub = stub;
+        this.pageSize = clientConfig.getNetKvScannerPageSize();
+        this.reqBuilder.setHeader(this.getHeader(this.session))
+                       .setMethod(scanMethod)
+                       .setTable(table)
+                       .setStart(toBs(toOk(startKey).getKey()))
+                       .setEnd(toBs(toOk(endKey).getKey()))
+                       .setLimit(limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit)
+                       .setPrefix(toBs(toOk(prefix).getKey()))
+                       .setCode(partition)
+                       .setScanType(scanType)
+                       .setQuery(toBs(query != null ? query : HgStoreClientConst.EMPTY_BYTES))
+                       .setPageSize(pageSize)
+                       .setPosition(toBs(this.nodePosition));
+        this.init();
+    }
+
+    public KvPageScanner(HgStoreNodeSession session, HgStoreStreamStub stub,
+                         ScanStreamReq.Builder reqBuilder) {
+        this.session = session;
+        this.stub = stub;
+        reqBuilder.setPageSize(pageSize);
+        reqBuilder.setPosition(toBs(this.nodePosition));
+        this.reqBuilder = reqBuilder;
+        this.init();
+    }
+
+    public static KvCloseableIterator<Kv> scanAll(HgStoreNodeSession nodeSession,
+                                                  HgStoreStreamStub stub, String table,
+                                                  long limit, byte[] query) {
+        return new KvPageScanner(ScanMethod.ALL, nodeSession, stub, table, null, null, null, limit,
+                                 -1, HgKvStore.SCAN_ANY, query);
+    }
+
+    public static KvCloseableIterator<Kv> scanPrefix(HgStoreNodeSession nodeSession,
+                                                     HgStoreStreamStub stub,
+                                                     String table, HgOwnerKey prefix, long limit,
+                                                     byte[] query) {
+        return new KvPageScanner(ScanMethod.PREFIX, nodeSession, stub, table, prefix, null, null,
+                                 limit,
+                                 prefix.getKeyCode(), HgKvStore.SCAN_PREFIX_BEGIN, query);
+    }
+
+    public static KvCloseableIterator<Kv> scanRange(HgStoreNodeSession nodeSession,
+                                                    HgStoreStreamStub stub,
+                                                    String table, HgOwnerKey startKey,
+                                                    HgOwnerKey endKey, long limit,
+                                                    int scanType, byte[] query) {
+        return new KvPageScanner(ScanMethod.RANGE, nodeSession, stub, table, null, startKey, endKey,
+                                 limit,
+                                 startKey.getKeyCode(), scanType, query);
+    }
+
+    static HgOwnerKey toOk(HgOwnerKey key) {
+        return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key;
+    }
+
+    static ByteString toBs(byte[] bytes) {
+        return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES);
+    }
+
+    private ScanStreamReq createScanReq() {
+        return this.reqBuilder.setPosition(toBs(this.nodePosition)).build();
+    }
+
+    private ScanStreamReq createStopReq() {
+        return this.reqBuilder.setHeader(this.getHeader(this.session)).setCloseFlag(1).build();
+    }
+
+    private void init() {
+        this.proxy = HgBufferProxy.of(() -> this.serverScan());
+        this.observer = this.stub.scan(new ServeObserverImpl());
+
+    }
+
+    /*** Server Event End ***/
+
+    private void serverScan() {
+        if (this.completed.get()) {
+            this.proxy.close();
+            return;
+        }
+        if (this.proxy.isClosed()) {
+            return;
+        }
+        this.send(this.createScanReq());
+    }
+
+    private void stopSever() {
+        this.send(this.createStopReq());
+    }
+
+    private void send(ScanStreamReq req) {
+        if (!this.completed.get()) {
+            try {
+                this.observer.onNext(req);
+            } catch (IllegalStateException | IllegalArgumentException e) {
+
+            } catch (Exception e) {
+                throw e;
+            }
+        }
+    }
+
+    private void clientError(String msg) {
+        this.observer.onError(GrpcUtil.toErr(msg));
+    }
+
+    /*** Iterator ***/
+    @Override
+    public boolean hasNext() {
+        if (!this.in) {
+            return false;
+        }
+        // QUESTION: After `this.iterator.hasNext()` evaluates to false,
+        //           no further attempts should make to reconstruct the iterator.
+        if (this.iterator != null && this.iterator.hasNext()) {
+            return true;
+        }
+        long start = 0;
+        boolean debugEnabled = log.isDebugEnabled();
+        if (debugEnabled) {
+            start = System.nanoTime();
+        }
+        List<Kv> data = this.proxy.receive(nextTimeout, (sec) -> {
+            String msg = "failed to receive data from net scanning, because of timeout [ " + sec +
+                         " ] sec.";
+            log.error(msg);
+            this.clientError(msg);
+            throw new RuntimeException(msg);
+        });
+        if (debugEnabled) {
+            MetricX.plusIteratorWait(System.nanoTime() - start);
+        }
+        if (data != null) {
+            this.iterator = data.iterator();
+        } else {
+            this.iterator = Collections.emptyIterator();
+        }
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public Kv next() {
+        if (this.iterator == null && !this.hasNext()) {
+            throw new NoSuchElementException();
+        }
+        return this.iterator.next();
+    }
+
+    @Override
+    public long getPageSize() {
+        return this.pageSize;
+    }
+
+    @Override
+    public boolean isPageEmpty() {
+        return !this.iterator.hasNext();
+    }
+
+    @Override
+    public byte[] position() {
+        return HgStoreClientUtil.toBytes(this.session.getStoreNode().getNodeId().longValue());
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        if (position == null || position.length < Long.BYTES) {
+            return;
+        }
+        byte[] nodeIdBytes = new byte[Long.BYTES];
+        System.arraycopy(position, 0, nodeIdBytes, 0, Long.BYTES);
+        long nodeId = this.session.getStoreNode().getNodeId().longValue();
+        long pId = HgStoreClientUtil.toLong(nodeIdBytes);
+        this.in = nodeId >= pId;
+        if (this.in && nodeId == pId) {
+            this.nodePosition = new byte[position.length - Long.BYTES];
+            System.arraycopy(position, Long.BYTES, this.nodePosition, 0, this.nodePosition.length);
+        } else {
+            this.nodePosition = HgStoreClientConst.EMPTY_BYTES;
+        }
+    }
+
+    @Override
+    public void close() {
+        this.stopSever();
+    }
+
+    /*** commons ***/
+    private Header getHeader(HgStoreNodeSession nodeSession) {
+        return Header.newBuilder().setGraph(nodeSession.getGraphName()).build();
+    }
+
+    /*** Server event Start ***/
+    private class ServeObserverImpl implements StreamObserver<KvPageRes> {
+
+        @Override
+        public void onNext(KvPageRes value) {
+            if (value.getOver()) {
+                completed.set(true);
+                observer.onCompleted();
+            }
+            proxy.send(value.getDataList());
+            if (completed.get()) {
+                proxy.close();
+            }
+        }
+
+        @Override
+        public void onError(Throwable t) {
+            completed.set(true);
+            try {
+                observer.onCompleted();
+            } catch (Exception e) {
+                log.warn("failed to invoke requestObserver.onCompleted(), reason:", e.getMessage());
+            }
+            proxy.close();
+            proxy.setError(t);
+            log.error("failed to complete scan of session: " + session, t);
+        }
+
+        @Override
+        public void onCompleted() {
+            completed.set(true);
+            proxy.close();
+        }
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java
new file mode 100644
index 0000000..491ad94
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.HgStoreNotice;
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.grpc.common.ResStatus;
+import org.apache.hugegraph.store.grpc.session.FeedbackRes;
+import org.apache.hugegraph.store.grpc.session.PartitionFaultResponse;
+import org.apache.hugegraph.store.grpc.session.PartitionFaultType;
+import org.apache.hugegraph.store.grpc.session.PartitionLeader;
+
+import com.google.protobuf.util.JsonFormat;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2021/11/18
+ *
+ * @version 0.3.0 on 2022/01/27
+ */
+@Slf4j
+final class NotifyingExecutor {
+
+    private final String graphName;
+    private final HgStoreNodeManager nodeManager;
+    private final HgStoreNodeSession nodeSession;
+
+    private Map<PartitionFaultType, Consumer<PartitionFaultResponse>> partitionFaultHandlers;
+
+    NotifyingExecutor(String graphName, HgStoreNodeManager nodeManager,
+                      HgStoreNodeSession nodeSession) {
+        this.graphName = graphName;
+        this.nodeManager = nodeManager;
+        this.nodeSession = nodeSession;
+    }
+
+    private void initHandler() {
+        this.partitionFaultHandlers = new HashMap<>();
+
+        this.partitionFaultHandlers.put(
+                PartitionFaultType.PARTITION_FAULT_TYPE_NOT_LEADER, notifyPartitionLeaderConsumer()
+        );
+
+    }
+
+    <T> Optional<T> invoke(Supplier<FeedbackRes> supplier, Function<FeedbackRes, T> okFunction) {
+        FeedbackRes res = null;
+
+        try {
+            res = supplier.get();
+        } catch (Throwable t) {
+            log.error("Failed to invoke: " + supplier.toString() + ", caused " +
+                      "by:", t);
+            handleErr(t);
+            throw err(t);
+        }
+
+        if (log.isDebugEnabled()) {
+            log.debug("gRPC [{}] status: {}"
+                    , this.nodeSession.getStoreNode().getAddress(), res.getStatus().getCode());
+        }
+
+        Optional<T> option = null;
+
+        switch (res.getStatus().getCode()) {
+            case RES_CODE_OK:
+                option = Optional.of(okFunction.apply(res));
+                break;
+            case RES_CODE_FAIL:
+                handleFail(res);
+                break;
+            case RES_CODE_NOT_EXIST:
+                break;
+            case RES_CODE_EXCESS:
+                normalFail(res);
+                break;
+            default:
+                log.error("gRPC [{}] status-msg: {}"
+                        , nodeSession.getStoreNode().getAddress(), res.getStatus().getMsg());
+        }
+
+        if (option == null) {
+            option = Optional.empty();
+        }
+
+        return option;
+    }
+
+    private void handleErr(Throwable t) {
+        try {
+            notifyErrConsumer(HgNodeStatus.NOT_WORK).accept(t);
+        } catch (Throwable tt) {
+            log.error("Failed to notify error to HgStoreNodeNotifier, cause:", tt);
+        }
+    }
+
+    private void handleFail(FeedbackRes feedbackRes) {
+        Supplier<HgStoreClientException> exSup;
+
+        if (
+                (exSup = handlePartitionFault(feedbackRes)) != null
+                // add more fault-handler here.
+                || (exSup = defaultExceptionSupplier(feedbackRes)) != null
+        ) {
+            throw exSup.get();
+        }
+
+    }
+
+    private void normalFail(FeedbackRes res) {
+        ResStatus status = res.getStatus();
+        HgStoreClientException ex;
+        try {
+            String msg = JsonFormat.printer().omittingInsignificantWhitespace()
+                                   .print(res);
+            ex = err(msg);
+        } catch (Exception e) {
+            ex = err(status.getCode() + ", " + status.getMsg());
+        }
+        throw ex;
+    }
+
+    private Supplier<HgStoreClientException> defaultExceptionSupplier(FeedbackRes feedbackRes) {
+        return () -> HgStoreClientException.of(err(feedbackRes.getStatus().getMsg()));
+    }
+
+    private Supplier<HgStoreClientException> handlePartitionFault(
+            FeedbackRes feedbackRes) {
+        PartitionFaultResponse res = feedbackRes.getPartitionFaultResponse();
+        if (res == null) {
+            return null;
+        }
+        if (this.partitionFaultHandlers == null) {
+            initHandler();
+        }
+        Consumer<PartitionFaultResponse> consumer =
+                this.partitionFaultHandlers.get(res.getFaultType());
+        if (consumer == null) {
+            consumer = notifyPartitionConsumer();
+        }
+        String msg = res.toString();
+        if (msg == null || msg.length() == 0) {
+            msg = feedbackRes.getStatus().getMsg();
+        }
+        consumer.accept(res);
+        String finalMsg = msg;
+        return () -> HgStoreClientException.of(
+                err(res.getFaultType() + ", " +
+                    finalMsg));
+    }
+
+    private HgStoreClientException err(String msg) {
+        return err(msg, null);
+    }
+
+    private HgStoreClientException err(Throwable t) {
+        return err(t.getMessage(), t);
+    }
+
+    private HgStoreClientException err(String reason, Throwable t) {
+        StringBuilder builder = new StringBuilder().append(
+                "{sessionInfo: {" + this.nodeSession.toString() +
+                "}, reason: ");
+        if (reason.startsWith("{")) {
+            builder.append(reason);
+        } else {
+            builder.append("\"").append(reason).append("\"");
+        }
+        String msg = builder.append("}").toString();
+        if (t != null) {
+            return HgStoreClientException.of(msg, t);
+        }
+        return HgStoreClientException.of(msg);
+    }
+
+    private Consumer<PartitionFaultResponse> notifyPartitionLeaderConsumer() {
+        return res -> {
+            log.info("partitions' leader have changed: [partitionId - leaderId] ");
+            nodeManager.notifying(
+                    this.graphName,
+                    HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(),
+                                     HgNodeStatus.NOT_PARTITION_LEADER)
+                                 .setPartitionLeaders(
+                                         res.getPartitionLeadersList()
+                                            .stream()
+                                            .peek((e) -> {
+                                                      log.info("[{} - {}]", e.getPartitionId(),
+                                                               e.getLeaderId());
+                                                  }
+                                            )
+                                            .collect(
+                                                    Collectors.toMap(
+                                                            PartitionLeader::getPartitionId,
+                                                            PartitionLeader::getLeaderId
+                                                    )
+                                            )
+                                 )
+            );
+        };
+    }
+
+    private Consumer<PartitionFaultResponse> notifyPartitionConsumer() {
+        return notifyPartitionConsumer(HgNodeStatus.PARTITION_COMMON_FAULT);
+    }
+
+    private Consumer<PartitionFaultResponse> notifyPartitionConsumer(HgNodeStatus status) {
+        return res -> {
+            nodeManager.notifying(
+                    this.graphName,
+                    HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(), status)
+                                 .setPartitionIds(res.getPartitionIdsList())
+            );
+        };
+    }
+
+    private Consumer<Throwable> notifyErrConsumer(HgNodeStatus status) {
+        return t -> {
+            nodeManager.notifying(
+                    this.graphName,
+                    HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(), status,
+                                     t.getMessage())
+            );
+        };
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java
new file mode 100644
index 0000000..289e65f
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.grpc.common.Header;
+
+import com.google.protobuf.ByteString;
+
+public class ScanUtil {
+
+    public static Header getHeader(HgStoreNodeSession nodeSession) {
+        return Header.newBuilder().setGraph(nodeSession.getGraphName()).build();
+    }
+
+    public static HgOwnerKey toOk(HgOwnerKey key) {
+        return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key;
+    }
+
+    public static ByteString toBs(byte[] bytes) {
+        return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES);
+    }
+
+    public static ByteString getHgOwnerKey(HgOwnerKey ownerKey) {
+        return toBs(toOk(ownerKey).getKey());
+    }
+
+    public static byte[] getQuery(byte[] query) {
+        return query != null ? query : HgStoreClientConst.EMPTY_BYTES;
+    }
+
+    public static long getLimit(long limit) {
+        return limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java
new file mode 100644
index 0000000..d807891
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Iterator;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.util.HgAssert;
+
+/**
+ * 2022/3/11
+ */
+class SeekAbleIterator<E> implements Iterator, HgSeekAble {
+
+    private final Iterator<E> iterator;
+    private final Consumer<byte[]> seeker;
+    private final Supplier<byte[]> positioner;
+
+    private SeekAbleIterator(Iterator<E> iterator, Supplier<byte[]> positioner,
+                             Consumer<byte[]> seeker) {
+        this.iterator = iterator;
+        this.positioner = positioner;
+        this.seeker = seeker;
+    }
+
+    public static <E> SeekAbleIterator of(Iterator<E> iterator, Supplier<byte[]> positioner,
+                                          Consumer<byte[]> seeker) {
+        HgAssert.isArgumentNotNull(iterator, "iterator");
+        HgAssert.isArgumentNotNull(positioner, "positioner");
+        HgAssert.isArgumentNotNull(seeker, "seeker");
+        return new SeekAbleIterator(iterator, positioner, seeker);
+    }
+
+    @Override
+    public byte[] position() {
+        return this.positioner.get();
+    }
+
+    @Override
+    public void seek(byte[] position) {
+        this.seeker.accept(position);
+    }
+
+    @Override
+    public boolean hasNext() {
+        return this.iterator.hasNext();
+    }
+
+    @Override
+    public E next() {
+        return this.iterator.next();
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java
new file mode 100644
index 0000000..4281481
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Optional;
+import java.util.function.Supplier;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/**
+ * 2021/12/1
+ */
+@ThreadSafe
+final class SwitchingExecutor {
+
+    private SwitchingExecutor() {
+    }
+
+    static SwitchingExecutor of() {
+        return new SwitchingExecutor();
+    }
+
+    <T> Optional<T> invoke(Supplier<Boolean> switcher, Supplier<T> trueSupplier,
+                           Supplier<T> falseSupplier) {
+        Optional<T> option = null;
+
+        if (switcher.get()) {
+            option = Optional.of(trueSupplier.get());
+        } else {
+            option = Optional.of(falseSupplier.get());
+        }
+        if (option == null) {
+            option = Optional.empty();
+        }
+
+        return option;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java
new file mode 100644
index 0000000..374e240
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.type;
+
+/**
+ * created on 2021/10/26
+ */
+public enum HgNodeStatus {
+
+    UNKNOWN(0, "UNKNOWN"),
+    NOT_EXIST(100, "NOT_EXIST"),    // Failed to apply for an instance via node-id from NodeManager.
+    NOT_ONLINE(105, "NOT_ONLINE"),  // Failed to connect to Store-Node at the first time.
+    NOT_WORK(110, "NOT_WORK"),      // When a Store-Node to be not work anymore.
+
+    PARTITION_COMMON_FAULT(200, "PARTITION_COMMON_FAULT"),
+    NOT_PARTITION_LEADER(205,
+                         "NOT_PARTITION_LEADER"); // When a Store-Node is not a specific
+    // partition leader.
+
+    private final int status;
+    private final String name;
+
+    HgNodeStatus(int status, String name) {
+        this.status = status;
+        this.name = name;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java
new file mode 100644
index 0000000..d35298c
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.type;
+
+/**
+ * created on 2021/10/27
+ */
+public class HgStoreClientException extends RuntimeException {
+
+    public HgStoreClientException(String msg) {
+        super(msg);
+    }
+
+    public HgStoreClientException(Throwable cause) {
+        super(cause);
+    }
+
+    public HgStoreClientException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public static HgStoreClientException of(String msg) {
+        return new HgStoreClientException(msg);
+    }
+
+    public static HgStoreClientException of(String msg, Throwable cause) {
+        return new HgStoreClientException(msg, cause);
+    }
+
+    public static HgStoreClientException of(Throwable cause) {
+        return new HgStoreClientException(cause);
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java
new file mode 100644
index 0000000..7fbe8c4
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * TODO: refer license later, 78% match, maybe refer to google? ensure it later
+ */
+public class Base58 {
+
+    public static final char[] ALPHABET =
+            "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz".toCharArray();
+    private static final int[] INDEXES = new int[128];
+
+    static {
+        for (int i = 0; i < INDEXES.length; i++) {
+            INDEXES[i] = -1;
+        }
+        for (int i = 0; i < ALPHABET.length; i++) {
+            INDEXES[ALPHABET[i]] = i;
+        }
+    }
+
+    /**
+     * Encodes the given bytes in base58. No checksum is appended.
+     */
+    public static String encode(byte[] input) {
+        if (input.length == 0) {
+            return "";
+        }
+        input = copyOfRange(input, 0, input.length);
+        // Count leading zeroes.
+        int zeroCount = 0;
+        while (zeroCount < input.length && input[zeroCount] == 0) {
+            ++zeroCount;
+        }
+        // The actual encoding.
+        byte[] temp = new byte[input.length * 2];
+        int j = temp.length;
+
+        int startAt = zeroCount;
+        while (startAt < input.length) {
+            byte mod = divmod58(input, startAt);
+            if (input[startAt] == 0) {
+                ++startAt;
+            }
+            temp[--j] = (byte) ALPHABET[mod];
+        }
+
+        // Strip extra '1' if there are some after decoding.
+        while (j < temp.length && temp[j] == ALPHABET[0]) {
+            ++j;
+        }
+        // Add as many leading '1' as there were leading zeros.
+        while (--zeroCount >= 0) {
+            temp[--j] = (byte) ALPHABET[0];
+        }
+
+        byte[] output = copyOfRange(temp, j, temp.length);
+        return new String(output, StandardCharsets.US_ASCII);
+    }
+
+    public static byte[] decode(String input) throws IllegalArgumentException {
+        if (input.length() == 0) {
+            return new byte[0];
+        }
+        byte[] input58 = new byte[input.length()];
+        // Transform the String to a base58 byte sequence
+        for (int i = 0; i < input.length(); ++i) {
+            char c = input.charAt(i);
+
+            int digit58 = -1;
+            if (c >= 0 && c < 128) {
+                digit58 = INDEXES[c];
+            }
+            if (digit58 < 0) {
+                throw new IllegalArgumentException("Illegal character " + c + " at " + i);
+            }
+
+            input58[i] = (byte) digit58;
+        }
+        // Count leading zeroes
+        int zeroCount = 0;
+        while (zeroCount < input58.length && input58[zeroCount] == 0) {
+            ++zeroCount;
+        }
+        // The encoding
+        byte[] temp = new byte[input.length()];
+        int j = temp.length;
+
+        int startAt = zeroCount;
+        while (startAt < input58.length) {
+            byte mod = divmod256(input58, startAt);
+            if (input58[startAt] == 0) {
+                ++startAt;
+            }
+
+            temp[--j] = mod;
+        }
+        // Do no add extra leading zeroes, move j to first non null byte.
+        while (j < temp.length && temp[j] == 0) {
+            ++j;
+        }
+
+        return copyOfRange(temp, j - zeroCount, temp.length);
+    }
+
+    public static BigInteger decodeToBigInteger(String input) throws IllegalArgumentException {
+        return new BigInteger(1, decode(input));
+    }
+
+    //
+    // number -> number / 58, returns number % 58
+    //
+    private static byte divmod58(byte[] number, int startAt) {
+        int remainder = 0;
+        for (int i = startAt; i < number.length; i++) {
+            int digit256 = (int) number[i] & 0xFF;
+            int temp = remainder * 256 + digit256;
+
+            number[i] = (byte) (temp / 58);
+
+            remainder = temp % 58;
+        }
+
+        return (byte) remainder;
+    }
+
+    //
+    // number -> number / 256, returns number % 256
+    //
+    private static byte divmod256(byte[] number58, int startAt) {
+        int remainder = 0;
+        for (int i = startAt; i < number58.length; i++) {
+            int digit58 = (int) number58[i] & 0xFF;
+            int temp = remainder * 58 + digit58;
+
+            number58[i] = (byte) (temp / 256);
+
+            remainder = temp % 256;
+        }
+
+        return (byte) remainder;
+    }
+
+    private static byte[] copyOfRange(byte[] source, int from, int to) {
+        byte[] range = new byte[to - from];
+        System.arraycopy(source, from, range, 0, range.length);
+
+        return range;
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java
new file mode 100644
index 0000000..006ee38
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class ExecutorPool {
+
+    public static ThreadFactory newThreadFactory(String namePrefix) {
+        HgAssert.isArgumentNotNull(namePrefix, "namePrefix");
+        return new DefaultThreadFactory(namePrefix);
+    }
+
+    public static ThreadPoolExecutor createExecutor(String name, long keepAliveTime,
+                                                    int coreThreads, int maxThreads) {
+        return new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
+                                      new SynchronousQueue<>(),
+                                      newThreadFactory(name),
+                                      new ThreadPoolExecutor.CallerRunsPolicy()
+        );
+    }
+
+    public static class DefaultThreadFactory implements ThreadFactory {
+
+        private final AtomicInteger threadNumber = new AtomicInteger(1);
+        private final String namePrefix;
+
+        public DefaultThreadFactory(String threadNamePrefix) {
+            this.namePrefix = threadNamePrefix + "-";
+        }
+
+        @Override
+        public Thread newThread(Runnable r) {
+            Thread t = new Thread(null, r, namePrefix + threadNumber.getAndIncrement(), 0);
+            t.setDaemon(true);
+            t.setPriority(Thread.NORM_PRIORITY);
+            return t;
+        }
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java
new file mode 100644
index 0000000..7da6a91
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.function.Supplier;
+
+public final class HgAssert {
+
+    public static void isTrue(boolean expression, String message) {
+        if (message == null) {
+            throw new IllegalArgumentException("message is null");
+        }
+        if (!expression) {
+            throw new IllegalArgumentException(message);
+        }
+    }
+
+    public static void isTrue(boolean expression, Supplier<String> msg) {
+        if (msg == null) {
+            throw new IllegalArgumentException("message supplier is null");
+        }
+        if (!expression) {
+            throw new IllegalArgumentException(msg.get());
+        }
+    }
+
+    public static void isFalse(boolean expression, String message) {
+        isTrue(!expression, message);
+    }
+
+    public static void isFalse(boolean expression, Supplier<String> msg) {
+        isTrue(!expression, msg);
+    }
+
+    public static void isArgumentValid(byte[] bytes, String parameter) {
+        isFalse(isInvalid(bytes), () -> "The argument is invalid: " + parameter);
+    }
+
+    public static void isArgumentValid(String str, String parameter) {
+        isFalse(isInvalid(str), () -> "The argument is invalid: " + parameter);
+    }
+
+    public static void isArgumentValid(Collection<?> collection, String parameter) {
+        isFalse(isInvalid(collection), () -> "The argument is invalid: " + parameter);
+    }
+
+    public static void isArgumentNotNull(Object obj, String parameter) {
+        isTrue(obj != null, () -> "The argument is null: " + parameter);
+    }
+
+    public static void istValid(byte[] bytes, String msg) {
+        isFalse(isInvalid(bytes), msg);
+    }
+
+    public static void isValid(String str, String msg) {
+        isFalse(isInvalid(str), msg);
+    }
+
+    public static void isNotNull(Object obj, String msg) {
+        isTrue(obj != null, msg);
+    }
+
+    public static boolean isContains(Object[] objs, Object obj) {
+        if (objs == null || objs.length == 0 || obj == null) {
+            return false;
+        }
+        for (Object item : objs) {
+            if (obj.equals(item)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public static boolean isInvalid(String... strs) {
+        if (strs == null || strs.length == 0) {
+            return true;
+        }
+        for (String item : strs) {
+            if (item == null || "".equals(item.trim())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public static boolean isInvalid(byte[] bytes) {
+        return bytes == null || bytes.length == 0;
+    }
+
+    public static boolean isInvalid(Map<?, ?> map) {
+        return map == null || map.isEmpty();
+    }
+
+    public static boolean isInvalid(Collection<?> list) {
+        return list == null || list.isEmpty();
+    }
+
+    public static <T> boolean isContains(Collection<T> list, T item) {
+        if (list == null || item == null) {
+            return false;
+        }
+        return list.contains(item);
+    }
+
+    public static boolean isNull(Object... objs) {
+        if (objs == null) {
+            return true;
+        }
+        for (Object item : objs) {
+            if (item == null) {
+                return true;
+            }
+        }
+        return false;
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java
new file mode 100644
index 0000000..9d62d36
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import javax.annotation.CheckForNull;
+
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2022/3/15
+ *
+ * @version 0.1.0
+ */
+@Slf4j
+public final class HgBufferProxy<T> {
+
+    private final BlockingQueue<Supplier<T>> queue;
+    private final AtomicBoolean closed = new AtomicBoolean(false);
+    private final ReentrantLock lock = new ReentrantLock();
+    private final Runnable task;
+    private Throwable err;
+
+    private HgBufferProxy(Runnable task) {
+        this.task = task;
+        this.queue = new LinkedBlockingQueue<>();
+    }
+
+    public static HgBufferProxy of(Runnable task) {
+        HgAssert.isArgumentNotNull(task, "task");
+        return new HgBufferProxy(task);
+    }
+
+    public void send(T t) {
+        if (t == null) {
+            throw new IllegalArgumentException("the argument t is null");
+        }
+        if (this.closed.get()) {
+            return;
+        }
+        this.lock.lock();
+        try {
+            this.queue.offer(() -> t);
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    private void apply() {
+        this.lock.lock();
+        try {
+            if (!this.closed.get()) {
+                this.task.run();
+                Thread.yield();
+            }
+        } finally {
+            this.lock.unlock();
+        }
+    }
+
+    /**
+     * return an item from the chan
+     *
+     * @return null when the chan has been closed
+     * @throws RuntimeException
+     */
+    @CheckForNull
+    public T receive(int time, Consumer<Integer> callback) {
+        Supplier<T> s;
+        if (this.closed.get()) {
+            s = this.queue.poll();
+            this.checkErr();
+            return s != null ? s.get() : null;
+        }
+        if (this.queue.size() <= 1) {
+            this.apply();
+        }
+        lock.lock();
+        try {
+            if (this.isClosed()) {
+                s = this.queue.poll();
+                this.checkErr();
+                return s != null ? s.get() : null;
+            }
+        } finally {
+            lock.unlock();
+        }
+        try {
+            s = this.queue.poll(time, TimeUnit.SECONDS);
+        } catch (Throwable t) {
+            log.error("failed to receive a item from channel, cause by: ", t);
+            throw HgStoreClientException.of(t);
+        }
+        if (s == null) {
+            if (this.closed.get()) {
+                s = this.queue.poll();
+            } else {
+                if (callback == null) {
+                    throw new RuntimeException("timeout, max time: " + time + " seconds");
+                } else {
+                    callback.accept(time);
+                }
+            }
+        }
+        this.checkErr();
+        return s != null ? s.get() : null;
+    }
+
+    public boolean isClosed() {
+        return this.closed.get();
+    }
+
+    /**
+     * @throws RuntimeException when fail to close the chan
+     */
+    public void close() {
+        if (this.closed.get()) {
+            return;
+        }
+        lock.lock();
+        this.closed.set(true);
+        try {
+            this.queue.offer(() -> null);
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    public void setError(Throwable streamErr) {
+        this.err = streamErr;
+    }
+
+    private void checkErr() {
+        if (this.err != null) {
+            throw HgStoreClientException.of(this.err);
+        }
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java
new file mode 100644
index 0000000..a121f1e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.PropertyResourceBundle;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2021/11/29
+ */
+@Slf4j
+public final class HgStoreClientConfig {
+
+    private static final int GRPC_DEFAULT_TIMEOUT_SECONDS = 100;
+    private static final int GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024;
+    private static final int GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024;
+
+    private static final int NET_KV_SCANNER_PAGE_SIZE = 10_000;
+    private static final int NET_KV_SCANNER_HAVE_NEXT_TIMEOUT = 30 * 60;
+    private static final String fileName = "hg-store-client";
+    private static PropertyResourceBundle prb = null;
+    private static HgStoreClientConfig defaultInstance;
+    private Integer grpcTimeoutSeconds = GRPC_DEFAULT_TIMEOUT_SECONDS;
+    private Integer grpcMaxInboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE;
+    private Integer grpcMaxOutboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE;
+    private Integer netKvScannerPageSize = NET_KV_SCANNER_PAGE_SIZE;
+    private Integer netKvScannerHaveNextTimeout = NET_KV_SCANNER_HAVE_NEXT_TIMEOUT;
+
+    private HgStoreClientConfig() {
+    }
+
+    public synchronized static HgStoreClientConfig of() {
+
+        if (defaultInstance != null) {
+            return defaultInstance;
+        }
+
+        defaultInstance = new HgStoreClientConfig();
+
+        overrideViaProperties(defaultInstance);
+
+        return defaultInstance;
+    }
+
+    private static void overrideViaProperties(HgStoreClientConfig config) {
+        try {
+            prb = (PropertyResourceBundle) PropertyResourceBundle.getBundle(fileName);
+        } catch (Throwable t) {
+            log.warn("Failed to load " + fileName + ".properties.");
+            log.info("Default configuration was activated.");
+            return;
+        }
+        PropertiesWrapper wrapper = new PropertiesWrapper(prb);
+
+        log.info("grpc.timeout.seconds = "
+                 + (config.grpcTimeoutSeconds = wrapper.getInt("grpc.timeout.seconds"
+                , config.grpcTimeoutSeconds))
+        );
+        log.info("grpc.max.inbound.message.size = "
+                 + (config.grpcMaxInboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE)
+        );
+        log.info("grpc.max.outbound.message.size = "
+                 + (config.grpcMaxOutboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE)
+        );
+        log.info("net.kv.scanner.page.size = "
+                 + (config.netKvScannerPageSize = wrapper.getInt("net.kv.scanner.page.size"
+                , config.netKvScannerPageSize))
+        );
+        log.info("net.kv.scanner.have.next.timeout = {}", config.netKvScannerHaveNextTimeout);
+    }
+
+    public Integer getGrpcTimeoutSeconds() {
+        return grpcTimeoutSeconds;
+    }
+
+    public HgStoreClientConfig setGrpcTimeoutSeconds(Integer grpcTimeoutSeconds) {
+        this.grpcTimeoutSeconds = grpcTimeoutSeconds;
+        return this;
+    }
+
+    public Integer getGrpcMaxInboundMessageSize() {
+        return grpcMaxInboundMessageSize;
+    }
+
+    public HgStoreClientConfig setGrpcMaxInboundMessageSize(Integer grpcMaxInboundMessageSize) {
+        this.grpcMaxInboundMessageSize = grpcMaxInboundMessageSize;
+        return this;
+    }
+
+    public Integer getGrpcMaxOutboundMessageSize() {
+        return grpcMaxOutboundMessageSize;
+    }
+
+    public HgStoreClientConfig setGrpcMaxOutboundMessageSize(Integer grpcMaxOutboundMessageSize) {
+        this.grpcMaxOutboundMessageSize = grpcMaxOutboundMessageSize;
+        return this;
+    }
+
+    public Integer getNetKvScannerPageSize() {
+        return netKvScannerPageSize;
+    }
+
+    public HgStoreClientConfig setNetKvScannerPageSize(Integer netKvScannerPageSize) {
+        this.netKvScannerPageSize = netKvScannerPageSize;
+        return this;
+    }
+
+    public Integer getNetKvScannerHaveNextTimeout() {
+        return netKvScannerHaveNextTimeout;
+    }
+
+    public HgStoreClientConfig setNetKvScannerHaveNextTimeout(Integer netKvScannerHaveNextTimeout) {
+        this.netKvScannerHaveNextTimeout = netKvScannerHaveNextTimeout;
+        return this;
+    }
+
+    private static class PropertiesWrapper {
+
+        private final PropertyResourceBundle prb;
+
+        PropertiesWrapper(PropertyResourceBundle prb) {
+            this.prb = prb;
+        }
+
+        Integer getInt(String key, Integer defaultValue) {
+
+            String buf = this.getStr(key);
+            if (buf == null || buf.isEmpty()) {
+                return defaultValue;
+            }
+
+            Integer res = null;
+            try {
+                res = Integer.valueOf(buf);
+            } catch (Throwable t) {
+                log.error("Failed to parse a int value[ " + buf + " ] of the key[ " + key + " ].",
+                          t);
+            }
+
+            return res;
+
+        }
+
+        String getStr(String key, String defaultValue) {
+            String res = getStr(key);
+
+            if (res == null && defaultValue != null) {
+                return defaultValue;
+            }
+
+            return res;
+        }
+
+        String getStr(String key) {
+            String value = null;
+
+            if (!prb.containsKey(key)) {
+                return null;
+            }
+
+            try {
+                value = prb.getString(key);
+            } catch (Exception e) {
+                log.warn("Failed to get value with key: [" + key + "]");
+                return null;
+            }
+
+            if (value != null) {
+                value = value.trim();
+            }
+
+            return value;
+        }
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java
new file mode 100644
index 0000000..8f8543f
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hugegraph.store.HgKvStore;
+import org.apache.hugegraph.store.HgOwnerKey;
+
+public final class HgStoreClientConst {
+
+    public final static String DEFAULT_NODE_CLUSTER_ID = "default-node-cluster";
+
+    public final static String EMPTY_STRING = "";
+    public final static String EMPTY_TABLE = "";
+    public final static byte[] EMPTY_BYTES = new byte[0];
+    public final static byte[] MAX_BYTES = new byte[]{(byte) 0b11111111};
+    public final static List EMPTY_LIST = Collections.EMPTY_LIST;
+
+    public final static byte[] ALL_PARTITION_OWNER = new byte[0];
+    // means to dispatch to all partitions.
+    public final static HgOwnerKey EMPTY_OWNER_KEY = HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES);
+    public final static HgOwnerKey ALL_PARTITION_OWNER_KEY =
+            HgOwnerKey.of(ALL_PARTITION_OWNER, ALL_PARTITION_OWNER);
+
+    //public final static int SCAN_GTE_BEGIN_LT_END = SCAN_GTE_BEGIN | SCAN_LT_END;
+    public final static int SCAN_TYPE_RANGE = HgKvStore.SCAN_GTE_BEGIN | HgKvStore.SCAN_LTE_END;
+    public final static int SCAN_TYPE_ANY = HgKvStore.SCAN_ANY;
+    public final static int NO_LIMIT = 0;
+
+    public final static int TX_SESSIONS_MAP_CAPACITY = 32;
+    public static final int NODE_MAX_RETRYING_TIMES = 10;
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java
new file mode 100644
index 0000000..5032d5a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/14
+ */
+@Slf4j
+public final class HgStoreClientUtil {
+
+    public static HgOwnerKey toOwnerKey(byte[] key) {
+        return new HgOwnerKey(HgStoreClientConst.EMPTY_BYTES, key);
+    }
+
+    public static HgOwnerKey toOwnerKey(String key) {
+        return new HgOwnerKey(HgStoreClientConst.EMPTY_BYTES, toBytes(key));
+    }
+
+    public static HgOwnerKey toAllNodeKey(String key) {
+        return new HgOwnerKey(HgStoreClientConst.ALL_PARTITION_OWNER, toBytes(key));
+    }
+
+    public static HgOwnerKey toOwnerKey(String owner, String key) {
+        return new HgOwnerKey(toBytes(owner), toBytes(key));
+    }
+
+    public static HgStoreClientException err(String msg) {
+        log.error(msg);
+        return HgStoreClientException.of(msg);
+    }
+
+    public static boolean isValid(HgOwnerKey key) {
+        if (key == null) {
+            return false;
+        }
+        if (key.getKey() == null) {
+            return false;
+        }
+        return key.getKey().length != 0;
+    }
+
+    public static String toStr(byte[] b) {
+        if (b == null) {
+            return "";
+        }
+        if (b.length == 0) {
+            return "";
+        }
+        return new String(b, StandardCharsets.UTF_8);
+    }
+
+    public static String toByteStr(byte[] b) {
+        if (b == null) {
+            return "";
+        }
+        if (b.length == 0) {
+            return "";
+        }
+        return Arrays.toString(b);
+    }
+
+    public static String toStr(HgOwnerKey ownerKey) {
+        if (ownerKey == null) {
+            return "";
+        }
+        return "{ " +
+               "owner: " + Arrays.toString(ownerKey.getOwner()) +
+               ", key: " + toStr(ownerKey.getKey()) +
+               " }";
+    }
+
+    public static byte[] toBytes(String str) {
+        if (str == null) {
+            return null;
+        }
+        return str.getBytes(StandardCharsets.UTF_8);
+    }
+
+    public static byte[] toBytes(long l) {
+        ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
+        buffer.putLong(l);
+        return buffer.array();
+    }
+
+    public static byte[] toIntBytes(final int i) {
+        ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES);
+        buffer.putInt(i);
+        return buffer.array();
+    }
+
+    public static long toLong(byte[] bytes) {
+        ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
+        buffer.put(bytes);
+        buffer.flip();//need flip
+        return buffer.getLong();
+    }
+
+    public static int toInt(byte[] bytes) {
+        ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES);
+        buffer.put(bytes);
+        buffer.flip();//need flip
+        return buffer.getInt();
+    }
+
+    public static String getHostAddress() {
+        String res = null;
+
+        try {
+            res = InetAddress.getLocalHost().getHostAddress();
+        } catch (UnknownHostException e) {
+            e.printStackTrace();
+            res = "";
+        }
+
+        return res;
+    }
+
+    public static byte[] combine(byte[] first, byte[] second) {
+        if (first == null) {
+            first = HgStoreClientConst.EMPTY_BYTES;
+        }
+
+        if (second == null) {
+            second = HgStoreClientConst.EMPTY_BYTES;
+        }
+
+        byte[] result = new byte[first.length + second.length];
+        System.arraycopy(first, 0, result, 0, first.length);
+        System.arraycopy(second, 0, result, first.length, second.length);
+        return result;
+    }
+
+    public static void printCallStack(String txt, Throwable ex) {
+        StackTraceElement[] stackElements = ex.getStackTrace();
+        StringBuilder sb = new StringBuilder();
+        sb.append(txt).append(":\n");
+        if (stackElements != null) {
+            for (int i = 0; i < stackElements.length; i++) {
+                sb.append(stackElements[i].getClassName()).append(" : ")
+                  .append(stackElements[i].getMethodName()).append(" [ ");
+                sb.append(stackElements[i].getLineNumber()).append(" ]\n");
+
+            }
+            sb.append(
+                    "--------------------------------------------------------------------------------------\n");
+        }
+        log.error(sb.toString());
+    }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java
new file mode 100644
index 0000000..fd83fef
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+public final class HgUuid {
+
+    private static String encode(UUID uuid) {
+        ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
+        bb.putLong(uuid.getMostSignificantBits());
+        bb.putLong(uuid.getLeastSignificantBits());
+        return Base58.encode(bb.array());
+    }
+
+    /**
+     * Get a UUID in Base58 FORM
+     *
+     * @return
+     */
+    public static String newUUID() {
+        return encode(UUID.randomUUID());
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java
new file mode 100644
index 0000000..99d4df1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * 2022/1/29
+ */
+public class MetricX {
+
+    // Total working time consumed by Iterators
+    public static AtomicLong iteratorSum = new AtomicLong();
+    // Num of Iterators
+    public static AtomicLong iteratorCount = new AtomicLong();
+    // Max working time consumed by Iterators
+    public static AtomicLong iteratorMax = new AtomicLong();
+    public AtomicLong failureCount = new AtomicLong();
+    // Combined to be used to record a task's time
+    private long start;
+    private long end;
+
+    private MetricX(long start) {
+        this.start = start;
+    }
+
+    public static MetricX ofStart() {
+        return new MetricX(System.currentTimeMillis());
+    }
+
+    public static void plusIteratorWait(long nanoSeconds) {
+        iteratorSum.addAndGet(nanoSeconds);
+        iteratorCount.getAndIncrement();
+        if (iteratorMax.get() < nanoSeconds) {
+            iteratorMax.set(nanoSeconds);
+        }
+    }
+
+    /**
+     * amount of waiting
+     *
+     * @return millisecond
+     */
+    public static long getIteratorWait() {
+        return iteratorSum.get() / 1_000_000;
+    }
+
+    /**
+     * average of waiting
+     *
+     * @return millisecond
+     */
+    public static long getIteratorWaitAvg() {
+        if (iteratorCount.get() == 0) {
+            return -1;
+        }
+        return getIteratorWait() / iteratorCount.get();
+    }
+
+    /**
+     * maximum of waiting
+     *
+     * @return millisecond
+     */
+    public static long getIteratorWaitMax() {
+        return iteratorMax.get() / 1_000_000;
+    }
+
+    public static long getIteratorCount() {
+        return iteratorCount.get();
+    }
+
+    public long start() {
+        return this.start = System.currentTimeMillis();
+    }
+
+    public long end() {
+        return this.end = System.currentTimeMillis();
+    }
+
+    public long past() {
+        return this.end - this.start;
+    }
+
+    public void countFail() {
+        this.failureCount.getAndIncrement();
+    }
+
+    public long getFailureCount() {
+        return this.failureCount.get();
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java
new file mode 100644
index 0000000..c59eed2
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.util;
+
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PropertyUtil {
+
+    private static final Logger LOG = LoggerFactory.getLogger(PropertyUtil.class);
+
+    public static String get(String key) {
+        return get(key, null);
+    }
+
+    public static String get(final String key, String def) {
+        if (key == null) {
+            throw new NullPointerException("key");
+        }
+        if (key.isEmpty()) {
+            throw new IllegalArgumentException("key must not be empty.");
+        }
+
+        String value = null;
+        try {
+            if (System.getSecurityManager() == null) {
+                value = System.getProperty(key);
+            } else {
+                value = AccessController.doPrivileged(
+                        (PrivilegedAction<String>) () -> System.getProperty(key));
+            }
+        } catch (Exception e) {
+            LOG.error("exception {}", e);
+        }
+
+        if (value == null) {
+            return def;
+        }
+
+        return value;
+    }
+
+    public static boolean getBoolean(String key, boolean def) {
+        String value = get(key, Boolean.toString(def));
+        value = value.trim().toLowerCase();
+        if (value.isEmpty()) {
+            return true;
+        }
+
+        if ("true".equals(value) || "yes".equals(value) || "1".equals(value)) {
+            return true;
+        }
+
+        if ("false".equals(value) || "no".equals(value) || "0".equals(value)) {
+            return false;
+        }
+        return def;
+    }
+
+    public static int getInt(String key, int def) {
+        String value = get(key);
+        if (value == null) {
+            return def;
+        }
+
+        value = value.trim().toLowerCase();
+        try {
+            return Integer.parseInt(value);
+        } catch (Exception e) {
+            LOG.warn("exception ", e);
+        }
+        return def;
+    }
+
+    public static Object setProperty(String key, String value) {
+        return System.getProperties().setProperty(key, value);
+    }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties b/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties
new file mode 100644
index 0000000..aa967e8
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#grpc.timeout.seconds=10
+#grpc.max.inbound.message.size=
+#grpc.max.outbound.message.size=
+#net.kv.scanner.page.size = 2000
+#Unit:second
+#net.kv.scanner.have.next.timeout=60
diff --git a/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml b/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml
new file mode 100644
index 0000000..6c64f58
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<!-- Config will be auto loaded every 60s -->
+<configuration status="error" monitorInterval="60">
+    <properties>
+        <property name="LOG_PATH">logs</property>
+        <property name="FILE_NAME">hg-store-client</property>
+    </properties>
+
+    <appenders>
+        <Console name="console" target="SYSTEM_OUT">
+            <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="false" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+        </Console>
+
+        <!-- Normal server log config -->
+        <RollingRandomAccessFile name="file" fileName="${LOG_PATH}/${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="false" immediateFlush="true">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="50MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto Delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="5">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- Separate & compress audit log, buffer size is 512KB -->
+        <RollingRandomAccessFile name="audit" fileName="${LOG_PATH}/audit-${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/audit-${FILE_NAME}-%d{yyyy-MM-dd-HH}-%i.gz"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <!-- Use simple format for audit log to speed up -->
+            <!-- PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} - %m%n"/ -->
+            <JsonLayout compact="true" eventEol="true" complete="false" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}" />
+            </JsonLayout>
+            <!-- Trigger after exceeding 1hour or 500MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="500MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 2 files per hour & auto Delete [after 60 days] or [over 5GB or 500 files] -->
+            <DefaultRolloverStrategy max="2">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.gz" />
+                    <IfLastModified age="60d" />
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="5GB" />
+                        <IfAccumulatedFileCount exceeds="500" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+    </appenders>
+
+    <loggers>
+        <logger name="io.grpc.netty" level="ERROR" additivity="false">
+            <appender-ref ref="file" />
+        </logger>
+        <root level="ERROR">
+            <appender-ref ref="console" />
+            <appender-ref ref="file" />
+        </root>
+
+    </loggers>
+</configuration>