feat(pd): integrate `pd-service` into hugegraph (#2528)

subtask of #2265

For detailed module analysis documentation, please refer to fs doc/link/wiki

TODO: Update the getPomVersion implement in the common-module

---------

Co-authored-by: imbajin <jin@apache.org>
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
index abdcac4..cb38c6e 100644
--- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
@@ -17,9 +17,11 @@
 
 package org.apache.hugegraph.pd.client;
 
+import org.apache.hugegraph.pd.common.Useless;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
 import org.apache.hugegraph.pd.grpc.discovery.Query;
 
+@Useless("discovery related")
 public interface Discoverable {
 
     NodeInfos getNodeInfos(Query query);
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
index 7a9f28c..c307b96 100644
--- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
@@ -28,6 +28,7 @@
 import java.util.function.Function;
 
 import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.Useless;
 import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
@@ -38,6 +39,7 @@
 import io.grpc.ManagedChannelBuilder;
 import lombok.extern.slf4j.Slf4j;
 
+@Useless("discovery related")
 @Slf4j
 public abstract class DiscoveryClient implements Closeable, Discoverable {
 
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
index 0ded328..049ca17 100644
--- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
@@ -20,9 +20,11 @@
 import java.util.Map;
 import java.util.function.Consumer;
 
+import org.apache.hugegraph.pd.common.Useless;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
 import org.apache.hugegraph.pd.grpc.discovery.RegisterType;
 
+@Useless("discovery related")
 public class DiscoveryClientImpl extends DiscoveryClient {
 
     private final String id;
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
index a96185e..268ccb6 100644
--- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
@@ -18,6 +18,7 @@
 package org.apache.hugegraph.pd.client;
 
 import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.Useless;
 import org.apache.hugegraph.pd.grpc.PDGrpc;
 import org.apache.hugegraph.pd.grpc.Pdpb;
 
@@ -27,6 +28,7 @@
 import io.grpc.stub.AbstractStub;
 import lombok.extern.slf4j.Slf4j;
 
+@Useless("license related")
 @Slf4j
 public class LicenseClient extends AbstractClient {
 
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Useless.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Useless.java
new file mode 100644
index 0000000..ec000f7
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Useless.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The "Useless" annotation indicates that the annotated object can be safely removed without
+ * affecting existing functionality, including objects that are only referenced in tests.
+ */
+@Target({ElementType.FIELD, ElementType.METHOD, ElementType.CONSTRUCTOR, ElementType.TYPE})
+@Retention(RetentionPolicy.SOURCE)
+public @interface Useless {
+
+    String value() default "Remove or handle it later";
+}
diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java
index 78bfe34..e774539 100644
--- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java
+++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java
@@ -23,6 +23,7 @@
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.Useless;
 import org.apache.hugegraph.pd.config.PDConfig;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
 import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
@@ -30,6 +31,7 @@
 
 import lombok.extern.slf4j.Slf4j;
 
+@Useless("discovery related")
 @Slf4j
 public class DiscoveryMetaStore extends MetadataRocksDBStore {
 
diff --git a/hugegraph-pd/hg-pd-service/pom.xml b/hugegraph-pd/hg-pd-service/pom.xml
new file mode 100644
index 0000000..682daba
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/pom.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+
+    <artifactId>hg-pd-service</artifactId>
+
+    <properties>
+        <jetcd-version>0.5.10</jetcd-version>
+    </properties>
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-grpc</artifactId>
+            <exclusions>
+                <exclusion>
+                    <groupId>io.grpc</groupId>
+                    <artifactId>*</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-core</artifactId>
+            <version>${revision}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>io.github.lognet</groupId>
+            <artifactId>grpc-spring-boot-starter</artifactId>
+            <version>4.5.5</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.springframework.boot</groupId>
+                    <artifactId>spring-boot-starter-logging</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-web</artifactId>
+            <version>2.5.14</version>
+            <!-- Exclude the Tomcat dependency -->
+            <exclusions>
+                <!-- Exclude the Tomcat dependency -->
+                <exclusion>
+                    <groupId>org.springframework.boot</groupId>
+                    <artifactId>spring-boot-starter-tomcat</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.springframework.boot</groupId>
+                    <artifactId>spring-boot-starter-logging</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-actuator</artifactId>
+            <version>2.5.14</version>
+        </dependency>
+        <dependency>
+            <groupId>io.micrometer</groupId>
+            <artifactId>micrometer-registry-prometheus</artifactId>
+            <version>1.7.12</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-jetty</artifactId>
+            <version>2.5.14</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-log4j2</artifactId>
+            <version>2.5.14</version>
+            <exclusions>
+                <exclusion>
+                    <artifactId>log4j-slf4j-impl</artifactId>
+                    <groupId>org.apache.logging.log4j</groupId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>com.lmax</groupId>
+            <artifactId>disruptor</artifactId>
+            <version>3.4.1</version>
+        </dependency>
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.24</version>
+        </dependency>
+        <!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java-util -->
+        <dependency>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java-util</artifactId>
+            <version>3.17.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hugegraph-common</artifactId>
+            <version>1.2.0</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-api</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.7</version>
+            <scope>compile</scope>
+        </dependency>
+
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-maven-plugin</artifactId>
+                <version>2.5.0</version>
+                <executions>
+                    <execution>
+                        <configuration>
+                            <mainClass>
+                                org.apache.hugegraph.pd.boot.HugePDServer
+                            </mainClass>
+                        </configuration>
+                        <!-- should configure explicitly without spring-boot-starter-parent -->
+                        <goals>
+                            <goal>repackage</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java
new file mode 100644
index 0000000..8150052
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.boot;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.context.annotation.ComponentScan;
+
+import com.alipay.remoting.util.StringUtils;
+
+/**
+ * PD 服务启动类
+ */
+@ComponentScan(basePackages = {"org.apache.hugegraph.pd"})
+@SpringBootApplication
+public class HugePDServer {
+
+    public static void main(String[] args) {
+        String logPath = System.getProperty("logging.path");
+        if (StringUtils.isBlank(logPath)) {
+            // TODO: enhance logging configuration
+            System.setProperty("logging.path", "logs");
+            System.setProperty("com.alipay.remoting.client.log.level", "error");
+        }
+
+        SpringApplication.run(HugePDServer.class);
+        System.out.println("Hugegraph-pd started.");
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java
new file mode 100644
index 0000000..71aead6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.metrics;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import io.micrometer.core.instrument.MeterRegistry;
+
+@Configuration
+public class MetricsConfig {
+
+    @Autowired
+    private PDMetrics metrics;
+
+    @Bean
+    public MeterRegistryCustomizer<MeterRegistry> metricsCommonTags() {
+        return (registry) -> registry.config().commonTags("hg", "pd");
+    }
+
+    @Bean
+    public MeterRegistryCustomizer<MeterRegistry> registerMeters() {
+        return (registry) -> {
+            metrics.init(registry);
+        };
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java
new file mode 100644
index 0000000..427d19c
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.metrics;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import io.micrometer.core.instrument.Gauge;
+import io.micrometer.core.instrument.MeterRegistry;
+import lombok.extern.slf4j.Slf4j;
+
+@Component
+@Slf4j
+public final class PDMetrics {
+
+    public static final String PREFIX = "hg";
+    private static final AtomicLong GRAPHS = new AtomicLong(0);
+    private MeterRegistry registry;
+
+    @Autowired
+    private PDService pdService;
+
+    public synchronized void init(MeterRegistry meterRegistry) {
+
+        if (registry == null) {
+            registry = meterRegistry;
+            registerMeters();
+        }
+
+    }
+
+    private void registerMeters() {
+        Gauge.builder(PREFIX + ".up", () -> 1).register(registry);
+
+        Gauge.builder(PREFIX + ".graphs", this::updateGraphs)
+             .description("Number of graphs registered in PD")
+             .register(registry);
+
+        Gauge.builder(PREFIX + ".stores", this::updateStores)
+             .description("Number of stores registered in PD")
+             .register(registry);
+
+    }
+
+    private long updateGraphs() {
+        long buf = getGraphs();
+
+        if (buf != GRAPHS.get()) {
+            GRAPHS.set(buf);
+            registerGraphMetrics();
+        }
+        return buf;
+    }
+
+    private long updateStores() {
+        return getStores();
+    }
+
+    private long getGraphs() {
+        return getGraphMetas().size();
+    }
+
+    private long getStores() {
+        try {
+            return this.pdService.getStoreNodeService().getStores(null).size();
+        } catch (PDException e) {
+            log.error(e.getMessage(), e);
+            e.printStackTrace();
+        }
+        return 0;
+    }
+
+    private List<Metapb.Graph> getGraphMetas() {
+        try {
+            return this.pdService.getPartitionService().getGraphs();
+        } catch (PDException e) {
+            log.error(e.getMessage(), e);
+        }
+        return Collections.EMPTY_LIST;
+    }
+
+    private void registerGraphMetrics() {
+        this.getGraphMetas().forEach(meta -> {
+            Gauge.builder(PREFIX + ".partitions", this.pdService.getPartitionService()
+                         , e -> e.getPartitions(meta.getGraphName()).size())
+                 .description("Number of partitions assigned to a graph")
+                 .tag("graph", meta.getGraphName())
+                 .register(this.registry);
+
+        });
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java
new file mode 100644
index 0000000..fab6d70
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.util.Objects;
+
+public class DemoModel {
+
+    private int status;
+    private String text;
+
+    public DemoModel(int status, String text) {
+        this.status = status;
+        this.text = text;
+    }
+
+    public int getStatus() {
+        return status;
+    }
+
+    public DemoModel setStatus(int status) {
+        this.status = status;
+        return this;
+    }
+
+    public String getText() {
+        return text;
+    }
+
+    public DemoModel setText(String text) {
+        this.text = text;
+        return this;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        DemoModel that = (DemoModel) o;
+        return status == that.status && Objects.equals(text, that.text);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(status, text);
+    }
+
+    @Override
+    public String toString() {
+        return "HgNodeStatus{" +
+               "status=" + status +
+               ", text='" + text + '\'' +
+               '}';
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java
new file mode 100644
index 0000000..809713a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import lombok.Data;
+
+@Data
+public class GraphRestRequest {
+
+    private int partitionCount;
+    private int shardCount;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java
new file mode 100644
index 0000000..da64168
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import lombok.Data;
+
+@Data
+public class GraphSpaceRestRequest {
+
+    private Long storageLimit;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java
new file mode 100644
index 0000000..8509b8a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import lombok.Data;
+
+@Data
+public class PeerRestRequest {
+
+    private String peerList;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java
new file mode 100644
index 0000000..9ed16f9
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+public class PromTargetsModel {
+
+    private static final String LABEL_METRICS_PATH = "__metrics_path__";
+    private static final String LABEL_SCHEME = "__scheme__";
+    private static final String LABEL_JOB_NAME = "job";
+    private static final String LABEL_CLUSTER = "cluster";
+    private final Map<String, String> labels = new HashMap<>();
+    private Set<String> targets = new HashSet<>();
+
+    private PromTargetsModel() {
+    }
+
+    public static PromTargetsModel of() {
+        return new PromTargetsModel();
+    }
+
+    public Set<String> getTargets() {
+        return targets;
+    }
+
+    public PromTargetsModel setTargets(Set<String> targets) {
+        if (targets != null) {
+            this.targets = targets;
+        }
+        return this;
+    }
+
+    public Map<String, String> getLabels() {
+        return labels;
+    }
+
+    public PromTargetsModel addTarget(String target) {
+        if (target == null) {
+            return this;
+        }
+        this.targets.add(target);
+        return this;
+    }
+
+    public PromTargetsModel setMetricsPath(String path) {
+        return this.addLabel(LABEL_METRICS_PATH, path);
+    }
+
+    public PromTargetsModel setScheme(String scheme) {
+        return this.addLabel(LABEL_SCHEME, scheme);
+    }
+
+    public PromTargetsModel setClusterId(String clusterId) {
+        return this.addLabel(LABEL_CLUSTER, clusterId);
+    }
+
+    public PromTargetsModel addLabel(String label, String value) {
+        if (label == null || value == null) {
+            return this;
+        }
+        this.labels.put(label, value);
+        return this;
+    }
+
+    @Override
+    public String toString() {
+        return "PromTargetModel{" +
+               "targets=" + targets +
+               ", labels=" + labels +
+               '}';
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java
new file mode 100644
index 0000000..371fb35
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.util.HashMap;
+
+import lombok.Data;
+
+@Data
+public class RegistryQueryRestRequest {
+
+    String appName;
+    String version;
+    HashMap<String, String> labels;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java
new file mode 100644
index 0000000..8319db1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.util.HashMap;
+
+import lombok.Data;
+
+@Data
+public class RegistryRestRequest {
+
+    String id;
+    String appName;
+    String version;
+    String address;
+    String interval;
+    HashMap<String, String> labels;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java
new file mode 100644
index 0000000..32956eb
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.io.Serializable;
+
+import org.apache.hugegraph.pd.grpc.Pdpb;
+
+import lombok.Data;
+
+@Data
+public class RegistryRestResponse {
+
+    Pdpb.ErrorType errorType;
+    String message;
+    Serializable data;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java
new file mode 100644
index 0000000..8aef76b
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import java.util.HashMap;
+
+import org.apache.hugegraph.pd.grpc.Pdpb;
+
+import lombok.Data;
+
+@Data
+public class RestApiResponse {
+
+    String message;
+    Object data;
+    int status;
+
+    public RestApiResponse(Object data, Pdpb.ErrorType status, String message) {
+        if (data == null) {
+            data = new HashMap<String, Object>();
+        }
+        this.data = data;
+        this.status = status.getNumber();
+        this.message = message;
+    }
+
+    public RestApiResponse() {
+
+    }
+
+    public RestApiResponse(Object data, int status, String message) {
+        if (data == null) {
+            data = new HashMap<String, Object>();
+        }
+        this.data = data;
+        this.status = status;
+        this.message = message;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java
new file mode 100644
index 0000000..6fa8dd1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import lombok.Data;
+
+@Data
+public class StoreRestRequest {
+
+    String storeState;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java
new file mode 100644
index 0000000..617c702
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.model;
+
+import lombok.Data;
+
+@Data
+public class TimeRangeRequest {
+
+    String startTime;
+    String endTime;
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java
new file mode 100644
index 0000000..9644e78
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.notice;
+
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.pd.common.HgAssert;
+
+import lombok.extern.slf4j.Slf4j;
+
+// TODO: merge/move to another package
+@Slf4j
+public class NoticeBroadcaster {
+
+    private final Supplier<Long> noticeSupplier;
+    private long noticeId;
+    private String durableId;
+    private Supplier<String> durableSupplier;
+    private Function<String, Boolean> removeFunction;
+    private int state; //0=ready; 1=notified; 2=done ack; -1=error
+    private int counter;
+    private long timestamp;
+
+    private NoticeBroadcaster(Supplier<Long> noticeSupplier) {
+        this.noticeSupplier = noticeSupplier;
+        this.timestamp = System.currentTimeMillis();
+    }
+
+    public static NoticeBroadcaster of(Supplier<Long> noticeSupplier) {
+        HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier");
+        return new NoticeBroadcaster(noticeSupplier);
+    }
+
+    public NoticeBroadcaster setDurableSupplier(Supplier<String> durableSupplier) {
+        this.durableSupplier = durableSupplier;
+        return this;
+    }
+
+    public NoticeBroadcaster setRemoveFunction(Function<String, Boolean> removeFunction) {
+        this.removeFunction = removeFunction;
+        return this;
+    }
+
+    public NoticeBroadcaster notifying() {
+
+        if (this.state >= 2) {
+            log.warn("Aborted notifying as ack has done. notice: {}", this);
+            return this;
+        }
+
+        this.counter++;
+
+        if (this.durableId == null && this.durableSupplier != null) {
+            try {
+                this.durableId = this.durableSupplier.get();
+            } catch (Throwable t) {
+                log.error("Failed to invoke durableSupplier, cause by:", t);
+            }
+        }
+
+        try {
+            this.noticeId = this.noticeSupplier.get();
+            state = 1;
+        } catch (Throwable t) {
+            state = -1;
+            log.error("Failed to invoke noticeSupplier: {}; cause by: " +
+                      this.noticeSupplier.toString(), t);
+        }
+
+        return this;
+    }
+
+    public boolean checkAck(long ackNoticeId) {
+        boolean flag = false;
+
+        if (this.noticeId == ackNoticeId) {
+            flag = true;
+            this.state = 2;
+        }
+
+        if (flag) {
+            this.doRemoveDurable();
+        }
+
+        return flag;
+    }
+
+    public boolean doRemoveDurable() {
+        log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}"
+                , this.noticeId, this.durableId);
+        boolean flag = false;
+
+        if (this.removeFunction == null) {
+            log.warn("The remove-function hasn't been set.");
+            return false;
+        }
+
+        if (this.durableId == null) {
+            log.warn("The durableId hasn't been set.");
+            return false;
+        }
+
+        try {
+            if (!(flag = this.removeFunction.apply(this.durableId))) {
+                log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}"
+                        , this.noticeId, this.durableId);
+            }
+        } catch (Throwable t) {
+            log.error("Failed to remove NoticeBroadcaster, noticeId: "
+                      + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t);
+        }
+
+        return flag;
+    }
+
+    public long getNoticeId() {
+        return noticeId;
+    }
+
+    public int getState() {
+        return state;
+    }
+
+    public int getCounter() {
+        return counter;
+    }
+
+    public String getDurableId() {
+        return durableId;
+    }
+
+    public void setDurableId(String durableId) {
+
+        if (HgAssert.isInvalid(durableId)) {
+            log.warn("Set an invalid durable-id to NoticeBroadcaster.");
+        }
+
+        this.durableId = durableId;
+    }
+
+    public long getTimestamp() {
+        return timestamp;
+    }
+
+    public void setTimestamp(long timestamp) {
+        this.timestamp = timestamp;
+    }
+
+    @Override
+    public String toString() {
+        return "NoticeBroadcaster{" +
+               "noticeId=" + noticeId +
+               ", durableId='" + durableId + '\'' +
+               ", state=" + state +
+               ", counter=" + counter +
+               ", timestamp=" + timestamp +
+               '}';
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java
new file mode 100644
index 0000000..431e479
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+import org.apache.hugegraph.pd.util.IdUtil;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@ThreadSafe
+@Slf4j
+abstract class AbstractObserverSubject {
+
+    /* send notice to client */
+    private final Map<Long, StreamObserver<PulseResponse>> observerHolder = new HashMap<>(1024);
+    /* notice from client */
+    private final Map<Long, PulseListener> listenerHolder = new HashMap<>(1024);
+
+    private final byte[] lock = new byte[0];
+    private final PulseResponse.Builder builder = PulseResponse.newBuilder();
+    private final PulseType pulseType;
+
+    protected AbstractObserverSubject(PulseType pulseType) {
+        this.pulseType = pulseType;
+    }
+
+    /**
+     * Add an observer from remote client
+     *
+     * @param observerId
+     * @param responseObserver
+     */
+    void addObserver(Long observerId, StreamObserver<PulseResponse> responseObserver) {
+        synchronized (this.observerHolder) {
+
+            if (this.observerHolder.containsKey(observerId)) {
+                responseObserver.onError(
+                        new Exception(
+                                "The observer-id[" + observerId + "] of " + this.pulseType.name()
+                                + " subject has been existing."));
+                return;
+            }
+
+            log.info("Adding a " + this.pulseType + "'s observer, observer-id is [" + observerId +
+                     "].");
+            this.observerHolder.put(observerId, responseObserver);
+        }
+
+    }
+
+    /**
+     * Remove an observer by id
+     *
+     * @param observerId
+     * @param responseObserver
+     */
+    void removeObserver(Long observerId, StreamObserver<PulseResponse> responseObserver) {
+        synchronized (this.observerHolder) {
+            log.info("Removing a " + this.pulseType + "'s observer, observer-id is [" + observerId +
+                     "].");
+            this.observerHolder.remove(observerId);
+        }
+
+        responseObserver.onCompleted();
+    }
+
+    abstract String toNoticeString(PulseResponse res);
+
+    /**
+     * @param c
+     * @return notice ID
+     */
+    protected long notifyClient(Consumer<PulseResponse.Builder> c) {
+        synchronized (lock) {
+
+            if (c == null) {
+                log.error(this.pulseType.name() +
+                          "'s notice was abandoned, caused by: notifyObserver(null)");
+                return -1;
+            }
+
+            try {
+                c.accept(this.builder.clear());
+            } catch (Throwable t) {
+                log.error(this.pulseType.name() + "'s notice was abandoned, caused by:", t);
+                return -1;
+            }
+
+            long noticeId = IdUtil.createMillisId();
+
+            Iterator<Map.Entry<Long, StreamObserver<PulseResponse>>> iter =
+                    observerHolder.entrySet().iterator();
+
+            // long start = System.currentTimeMillis();
+            while (iter.hasNext()) {
+                Map.Entry<Long, StreamObserver<PulseResponse>> entry = iter.next();
+                Long observerId = entry.getKey();
+                PulseResponse res =
+                        this.builder.setObserverId(observerId).setNoticeId(noticeId).build();
+
+                try {
+                    entry.getValue().onNext(res);
+                } catch (Throwable e) {
+                    log.error("Failed to send " + this.pulseType.name() + "'s notice[" +
+                              toNoticeString(res)
+                              + "] to observer[" + observerId + "].", e);
+
+                    // TODO: ? try multi-times?
+                    // iter.remove();
+                    log.error("Removed a " + this.pulseType.name() + "'s observer[" + entry.getKey()
+                              + "], because of once failure of sending.", e);
+                }
+
+            }
+
+            // log.info("notice client: notice id: {}, ts :{}, cost: {}", noticeId, System
+            // .currentTimeMillis(),
+            //        (System.currentTimeMillis() - start )/1000);
+            return noticeId;
+        }
+
+    }
+
+    abstract long notifyClient(com.google.protobuf.GeneratedMessageV3 response);
+
+    protected void notifyError(int code, String message) {
+        synchronized (lock) {
+            Iterator<Map.Entry<Long, StreamObserver<PulseResponse>>> iter =
+                    observerHolder.entrySet().iterator();
+            while (iter.hasNext()) {
+                Map.Entry<Long, StreamObserver<PulseResponse>> entry = iter.next();
+                Long observerId = entry.getKey();
+                PulseResponse res = this.builder.setObserverId(observerId).build();
+                try {
+                    entry.getValue().onError(Status.fromCodeValue(code).withDescription(message)
+                                                   .asRuntimeException());
+                } catch (Throwable e) {
+                    log.warn("Failed to send {} 's notice[{}] to observer[{}], error:{}",
+                             this.pulseType.name(), toNoticeString(res), observerId,
+                             e.getMessage());
+                }
+            }
+        }
+    }
+
+    /**
+     * Add a listener from local server
+     *
+     * @param listenerId
+     * @param listener
+     */
+    void addListener(Long listenerId, PulseListener<?> listener) {
+        synchronized (this.listenerHolder) {
+
+            if (this.listenerHolder.containsKey(listenerId)) {
+                listener.onError(
+                        new Exception(
+                                "The listener-id[" + listenerId + "] of " + this.pulseType.name()
+                                + " subject has been existing."));
+                return;
+            }
+
+            log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId +
+                     "].");
+            this.listenerHolder.put(listenerId, listener);
+
+        }
+
+    }
+
+    /**
+     * Remove a listener by id
+     *
+     * @param listenerId
+     * @param listener
+     */
+    void removeListener(Long listenerId, PulseListener<?> listener) {
+        synchronized (this.listenerHolder) {
+            log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId +
+                     "].");
+            this.observerHolder.remove(listenerId);
+        }
+
+        listener.onCompleted();
+    }
+
+    abstract <T> Function<PulseNoticeRequest, T> getNoticeHandler();
+
+    void handleClientNotice(PulseNoticeRequest noticeRequest) throws Exception {
+
+        Iterator<Map.Entry<Long, PulseListener>> iter = listenerHolder.entrySet().iterator();
+
+        while (iter.hasNext()) {
+            Map.Entry<Long, PulseListener> entry = iter.next();
+            Long listenerId = entry.getKey();
+            entry.getValue().onNext(getNoticeHandler().apply(noticeRequest));
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java
new file mode 100644
index 0000000..6c7c218
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull;
+import static org.apache.hugegraph.pd.grpc.Pdpb.ErrorType.NOT_LEADER;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType;
+import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+import org.apache.hugegraph.pd.notice.NoticeBroadcaster;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.util.IdUtil;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.Parser;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@ThreadSafe
+public class PDPulseSubject {
+
+    private final static long NOTICE_EXPIRATION_TIME = 30 * 60 * 1000;
+    private final static int RETRYING_PERIOD_SECONDS = 60;
+    private final static Map<String, AbstractObserverSubject> subjectHolder =
+            new ConcurrentHashMap<>();
+    private final static ConcurrentLinkedQueue<NoticeBroadcaster> broadcasterQueue =
+            new ConcurrentLinkedQueue<>();
+    private final static ScheduledExecutorService scheduledExecutor =
+            Executors.newScheduledThreadPool(1);
+
+    private static Supplier<List<Metapb.QueueItem>> queueRetrieveFunction =
+            () -> Collections.emptyList();
+    private static Function<Metapb.QueueItem, Boolean> queueDurableFunction = (e) -> true;
+    private static Function<String, Boolean> queueRemoveFunction = (e) -> true;
+
+    static {
+        subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(),
+                          new PartitionHeartbeatSubject());
+        subjectHolder.put(PulseType.PULSE_TYPE_PD_INSTRUCTION.name(), new PdInstructionSubject());
+        // add some other type here...
+        // ...
+    }
+
+    //Schedule tasks
+    static {
+        scheduledExecutor.scheduleAtFixedRate(() -> doSchedule(), 0, RETRYING_PERIOD_SECONDS,
+                                              TimeUnit.SECONDS);
+    }
+
+    private static void doSchedule() {
+        appendQueue();
+        expireQueue();
+        //retry
+        broadcasterQueue.forEach(e -> {
+            e.notifying();
+        });
+    }
+
+    private static void appendQueue() {
+        broadcasterQueue.addAll(
+                getQueueItems()
+                        .parallelStream()
+                        .filter(e -> !broadcasterQueue
+                                .stream()
+                                .anyMatch(b -> e.getItemId().equals(b.getDurableId()))
+                        ).map(e -> createBroadcaster(e))
+                        .peek(e -> log.info("Appending notice: {}", e))
+                        .filter(e -> e != null)
+                        .collect(Collectors.toList())
+        );
+    }
+
+    private static void expireQueue() {
+        broadcasterQueue.removeIf(e -> {
+            if (System.currentTimeMillis() - e.getTimestamp() >= NOTICE_EXPIRATION_TIME) {
+                log.info("Notice was expired, trying to remove, notice: {}", e);
+                return e.doRemoveDurable();
+            } else {
+                return false;
+            }
+        });
+    }
+
+    private static List<Metapb.QueueItem> getQueueItems() {
+        try {
+            return queueRetrieveFunction.get();
+        } catch (Throwable t) {
+            log.error("Failed to retrieve queue from queueRetrieveFunction, cause by:", t);
+        }
+
+        return Collections.emptyList();
+    }
+
+    public static void setQueueRetrieveFunction(
+            Supplier<List<Metapb.QueueItem>> queueRetrieveFunction) {
+        HgAssert.isArgumentNotNull(queueRetrieveFunction, "queueRetrieveFunction");
+        PDPulseSubject.queueRetrieveFunction = queueRetrieveFunction;
+    }
+
+    public static void setQueueDurableFunction(
+            Function<Metapb.QueueItem, Boolean> queueDurableFunction) {
+        HgAssert.isArgumentNotNull(queueDurableFunction, "queueDurableFunction");
+        PDPulseSubject.queueDurableFunction = queueDurableFunction;
+    }
+
+    public static void setQueueRemoveFunction(Function<String, Boolean> queueRemoveFunction) {
+        HgAssert.isArgumentNotNull(queueRemoveFunction, "queueRemoveFunction");
+        PDPulseSubject.queueRemoveFunction = queueRemoveFunction;
+    }
+
+    /**
+     * Add a responseObserver of client
+     *
+     * @param responseObserver
+     * @return
+     */
+    public static StreamObserver<PulseRequest> addObserver(
+            StreamObserver<PulseResponse> responseObserver) {
+        isArgumentNotNull(responseObserver, "responseObserver");
+        return new PDPulseStreamObserver(responseObserver);
+    }
+
+    /**
+     * Send Notice to pd-client
+     *
+     * @param responseBuilder
+     */
+    public static void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) {
+        HgAssert.isArgumentNotNull(responseBuilder, "responseBuilder");
+        notifyClient(responseBuilder.build());
+    }
+
+    private static void notifyClient(PartitionHeartbeatResponse response) {
+        doBroadcast(createBroadcaster(response));
+    }
+
+    public static void notifyClient(PdInstructionResponse response) {
+        doBroadcast(createBroadcaster(response));
+    }
+
+    private static void doBroadcast(NoticeBroadcaster broadcaster) {
+        broadcasterQueue.add(broadcaster.notifying());
+    }
+
+    private static AbstractObserverSubject getSubject(PulseType pulseType) {
+        return subjectHolder.get(pulseType.name());
+    }
+
+    private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) {
+        PartitionHeartbeatResponse notice = toNotice(item);
+        if (notice == null) {
+            return null;
+        }
+        NoticeBroadcaster res = createBroadcaster(notice);
+        res.setDurableId(item.getItemId());
+        res.setTimestamp(item.getTimestamp());
+        return res;
+    }
+
+    private static NoticeBroadcaster createBroadcaster(PartitionHeartbeatResponse notice) {
+        return NoticeBroadcaster.of(getNoticeSupplier(notice))
+                                .setDurableSupplier(getDurableSupplier(notice))
+                                .setRemoveFunction(getRemoveFunction());
+    }
+
+    private static NoticeBroadcaster createBroadcaster(PdInstructionResponse notice) {
+        return NoticeBroadcaster.of(getNoticeSupplier(notice))
+                                .setDurableSupplier(getDurableSupplier(notice))
+                                .setRemoveFunction(getRemoveFunction());
+    }
+
+    // public static Supplier<Long> getNoticeSupplier(PartitionHeartbeatResponse notice) {
+    // TODO: PartitionHeartbeatSubject.class -> T
+    //    return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT,
+    //    PartitionHeartbeatSubject.class)
+    //            .notifyClient(notice);
+    // }
+
+    public static <T extends com.google.protobuf.GeneratedMessageV3> Supplier<Long> getNoticeSupplier(
+            T notice) {
+        PulseType type;
+        if (notice instanceof PdInstructionResponse) {
+            type = PulseType.PULSE_TYPE_PD_INSTRUCTION;
+        } else if (notice instanceof PartitionHeartbeatResponse) {
+            type = PulseType.PULSE_TYPE_PARTITION_HEARTBEAT;
+        } else {
+            throw new IllegalArgumentException("Unknown pulse type " + notice.getClass().getName());
+        }
+        return () -> getSubject(type).notifyClient(notice);
+    }
+
+    private static Supplier<String> getDurableSupplier(
+            com.google.protobuf.GeneratedMessageV3 notice) {
+        return () -> {
+            Metapb.QueueItem queueItem = toQueueItem(notice);
+            String res = null;
+
+            try {
+                if (queueDurableFunction.apply(queueItem)) {
+                    res = queueItem.getItemId();
+                } else {
+                    log.error(
+                            "Failed to persist queue-item that contained " +
+                            "PartitionHeartbeatResponse: {}"
+                            , notice);
+                }
+            } catch (Throwable t) {
+                log.error("Failed to invoke queueDurableFunction, cause by:", t);
+            }
+
+            return res;
+        };
+    }
+
+    private static Function<String, Boolean> getRemoveFunction() {
+        return s -> {
+            boolean flag = false;
+
+            try {
+                flag = queueRemoveFunction.apply(s);
+            } catch (Throwable t) {
+                log.error("Failed to invoke queueRemoveFunction, cause by:", t);
+            }
+
+            return flag;
+        };
+    }
+
+    private static Metapb.QueueItem toQueueItem(com.google.protobuf.GeneratedMessageV3 notice) {
+        return Metapb.QueueItem.newBuilder()
+                               .setItemId(IdUtil.createMillisStr())
+                               .setItemClass(notice.getClass().getTypeName())
+                               .setItemContent(notice.toByteString())
+                               .setTimestamp(System.currentTimeMillis())
+                               .build();
+    }
+
+    private static PartitionHeartbeatResponse toNotice(Metapb.QueueItem item) {
+        Parser<PartitionHeartbeatResponse> parser = PartitionHeartbeatResponse.parser();
+        PartitionHeartbeatResponse buf = null;
+        try {
+            buf = parser.parseFrom(item.getItemContent());
+        } catch (InvalidProtocolBufferException t) {
+            log.error("Failed to parse queue-item to PartitionHeartbeatResponse, cause by:", t);
+        }
+        return buf;
+    }
+
+    public static void notifyError(int code, String message) {
+        subjectHolder.forEach((k, v) -> {
+            v.notifyError(code, message);
+        });
+    }
+
+    /**
+     * Adding notice listener, the notice is come from pd-client.
+     *
+     * @param listener
+     */
+    public static void listenPartitionHeartbeat(PulseListener<PartitionHeartbeatRequest> listener) {
+        subjectHolder.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name())
+                     .addListener(createListenerId(), listener);
+    }
+
+    private static Long createListenerId() {
+        // TODO: Maybe some other way...
+        return createObserverId();
+    }
+
+    private static Long createObserverId() {
+        return IdUtil.createMillisId();
+    }
+
+    /* inner classes below */
+
+    private static class PDPulseStreamObserver implements StreamObserver<PulseRequest> {
+
+        private final StreamObserver<PulseResponse> responseObserver;
+        private AbstractObserverSubject subject;
+        private Long observerId;
+
+        PDPulseStreamObserver(StreamObserver<PulseResponse> responseObserver) {
+            this.responseObserver = responseObserver;
+        }
+
+        private void cancelObserver() {
+
+            if (this.subject == null) {
+                this.responseObserver.onError(
+                        new Exception("Invoke cancel-observer before create-observer."));
+                return;
+            }
+
+            this.subject.removeObserver(this.observerId, this.responseObserver);
+        }
+
+        private void addObserver(PulseCreateRequest request) {
+            if (this.subject != null) {
+                return;
+            }
+
+            PulseType pulseType = getPulseType(request);
+            if (pulseType == null) {
+                return;
+            }
+
+            this.subject = getSubject(pulseType);
+            this.observerId = createObserverId();
+
+            this.subject.addObserver(this.observerId, this.responseObserver);
+        }
+
+        private void ackNotice(long noticeId, long observerId) {
+            // log.info("ack noticeId, noticeId: {}, observerId: {}, ts:{}",
+            // noticeId,observerId, System.currentTimeMillis());
+            broadcasterQueue.removeIf(e -> e.checkAck(noticeId));
+        }
+
+        private PulseType getPulseType(PulseCreateRequest request) {
+            PulseType pulseType = request.getPulseType();
+
+            if (pulseType.equals(PulseType.PULSE_TYPE_UNKNOWN)) {
+                this.responseObserver.onError(new Exception("unknown pulse type."));
+                return null;
+            }
+
+            return pulseType;
+        }
+
+        private AbstractObserverSubject getSubject(PulseType pulseType) {
+            AbstractObserverSubject subject = subjectHolder.get(pulseType.name());
+
+            if (subject == null) {
+                responseObserver.onError(
+                        new Exception("Unsupported pulse-type: " + pulseType.name()));
+                return null;
+            }
+
+            return subject;
+        }
+
+        private void handleNotice(PulseNoticeRequest noticeRequest) {
+            try {
+                subject.handleClientNotice(noticeRequest);
+            } catch (Exception e) {
+                if (e instanceof PDException) {
+                    var pde = (PDException) e;
+                    if (pde.getErrorCode() == NOT_LEADER.getNumber()) {
+                        try {
+                            log.info("send change leader command to watch, due to ERROR-100", pde);
+                            notifyClient(PdInstructionResponse.newBuilder()
+                                                              .setInstructionType(
+                                                                      PdInstructionType.CHANGE_TO_FOLLOWER)
+                                                              .setLeaderIp(RaftEngine.getInstance()
+                                                                                     .getLeaderGrpcAddress())
+                                                              .build());
+                        } catch (ExecutionException | InterruptedException ex) {
+                            log.error("send notice to observer failed, ", ex);
+                        }
+                    }
+                } else {
+                    log.error("handleNotice error", e);
+                }
+            }
+        }
+
+        @Override
+        public void onNext(PulseRequest pulseRequest) {
+
+            if (pulseRequest.hasCreateRequest()) {
+                this.addObserver(pulseRequest.getCreateRequest());
+                return;
+            }
+
+            if (pulseRequest.hasCancelRequest()) {
+                this.cancelObserver();
+                return;
+            }
+
+            if (pulseRequest.hasNoticeRequest()) {
+                this.handleNotice(pulseRequest.getNoticeRequest());
+            }
+
+            if (pulseRequest.hasAckRequest()) {
+                this.ackNotice(pulseRequest.getAckRequest().getNoticeId()
+                        , pulseRequest.getAckRequest().getObserverId());
+            }
+        }
+
+        @Override
+        public void onError(Throwable throwable) {
+            this.cancelObserver();
+        }
+
+        @Override
+        public void onCompleted() {
+            this.cancelObserver();
+        }
+
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java
new file mode 100644
index 0000000..1824adc
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+import java.util.function.Function;
+
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+
+import com.google.protobuf.GeneratedMessageV3;
+
+public class PartitionHeartbeatSubject extends AbstractObserverSubject {
+
+    PartitionHeartbeatSubject() {
+        super(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT);
+    }
+
+    @Override
+    String toNoticeString(PulseResponse res) {
+        return res.getPartitionHeartbeatResponse().toString();
+    }
+
+    @Override
+    Function<PulseNoticeRequest, PartitionHeartbeatRequest> getNoticeHandler() {
+        return r -> r.getPartitionHeartbeatRequest();
+    }
+
+    void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) {
+
+        super.notifyClient(b -> {
+            b.setPartitionHeartbeatResponse(responseBuilder);
+        });
+
+    }
+
+    @Override
+    long notifyClient(GeneratedMessageV3 response) {
+        return super.notifyClient(b -> {
+            b.setPartitionHeartbeatResponse((PartitionHeartbeatResponse) response);
+        });
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java
new file mode 100644
index 0000000..e123384
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+import java.util.function.Function;
+
+import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+
+import com.google.protobuf.GeneratedMessageV3;
+
+public class PdInstructionSubject extends AbstractObserverSubject {
+
+    protected PdInstructionSubject() {
+        super(PulseType.PULSE_TYPE_PD_INSTRUCTION);
+    }
+
+    @Override
+    String toNoticeString(PulseResponse res) {
+        return res.getInstructionResponse().toString();
+    }
+
+    /**
+     * pd单纯的向pulse发送的指令,不接收对应的notice
+     *
+     * @return null
+     */
+    @Override
+    Function<PulseNoticeRequest, PdInstructionSubject> getNoticeHandler() {
+        return pulseNoticeRequest -> null;
+    }
+
+    @Override
+    long notifyClient(GeneratedMessageV3 response) {
+        return super.notifyClient(b -> {
+            b.setInstructionResponse((PdInstructionResponse) response);
+        });
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java
new file mode 100644
index 0000000..8a24725
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+public interface PulseListener<T> {
+
+    /**
+     * Invoked on new notice.
+     *
+     * @param notice the notice.
+     */
+    void onNext(T notice) throws Exception;
+
+    /**
+     * Invoked on errors.
+     *
+     * @param throwable the error.
+     */
+    void onError(Throwable throwable);
+
+    /**
+     * Invoked on completion.
+     */
+    void onCompleted();
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java
new file mode 100644
index 0000000..d748d23
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.pd.common.PDException;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.MessageOrBuilder;
+import com.google.protobuf.util.JsonFormat;
+
+public class API {
+
+    // TODO: use a flexible way to define the version
+    // refer: https://github.com/apache/hugegraph/pull/2528#discussion_r1573823996
+    public static final String VERSION = "1.5.0";
+    public static final String PD = "PD";
+    public static final String STORE = "STORE";
+    public static String STATUS_KEY = "status";
+    public static String ERROR_KEY = "error";
+    public static String QUOTATION = "\"";
+    public static String COMMA = ",";
+    public static String COLON = ": ";
+
+    public <T extends MessageOrBuilder> String toJSON(List<T> values, String key) {
+
+        StringBuilder builder = new StringBuilder();
+        builder.append("{")
+               .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0")
+               .append(COMMA)
+               .append(QUOTATION).append(key).append(QUOTATION).append(COLON)
+               .append("[ ");
+
+        if (values != null) {
+            values.forEach(s -> {
+                try {
+                    builder.append(JsonFormat.printer().print(s));
+                } catch (InvalidProtocolBufferException e) {
+                    e.printStackTrace();
+                }
+                builder.append(",");
+            });
+            builder.deleteCharAt(builder.length() - 1);
+        }
+        builder.append("]}");
+        return builder.toString();
+    }
+
+    public String toJSON(MessageOrBuilder value, String key) {
+        StringBuilder builder = new StringBuilder();
+        builder.append("{")
+               .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0")
+               .append(COMMA)
+               .append(QUOTATION).append(key).append(QUOTATION).append(COLON);
+        try {
+            if (value != null) {
+                builder.append(JsonFormat.printer().print(value));
+            } else {
+                builder.append("{}");
+            }
+            builder.append("}");
+            return builder.toString();
+        } catch (InvalidProtocolBufferException e) {
+            e.printStackTrace();
+            return toJSON(e);
+        }
+
+    }
+
+    public <T extends MessageOrBuilder> String toJSON(Map<String, List<T>> values) {
+        StringBuilder builder = new StringBuilder();
+        builder.append("{ ");
+        for (Map.Entry<String, List<T>> entry : values.entrySet()) {
+            String entryKey = entry.getKey();
+            List<T> entryValue = entry.getValue();
+            builder.append(QUOTATION).append(entryKey).append(QUOTATION).append(COLON).append("[");
+            if ((entryValue != null) && !(entryValue.isEmpty())) {
+                entryValue.forEach(s -> {
+                    try {
+                        if (s == null) {
+                            builder.append("null");
+                        } else {
+                            builder.append(JsonFormat.printer().print(s));
+                        }
+                    } catch (InvalidProtocolBufferException e) {
+                        e.printStackTrace();
+                    }
+                    builder.append(",");
+                });
+                builder.deleteCharAt(builder.length() - 1); //删除最后一个逗号
+            }
+            builder.append("]").append(COMMA);
+        }
+        builder.deleteCharAt(builder.length() - 1);
+        builder.append("}");
+        return builder.toString();
+    }
+
+    public String toJSON(PDException exception) {
+        String builder = "{" +
+                         QUOTATION + STATUS_KEY + QUOTATION + COLON +
+                         exception.getErrorCode() + COMMA +
+                         QUOTATION + ERROR_KEY + QUOTATION + COLON +
+                         QUOTATION + exception.getMessage() + QUOTATION +
+                         "}";
+
+        return builder;
+    }
+
+    public String toJSON(Exception exception) {
+        String builder = "{" +
+                         QUOTATION + STATUS_KEY + QUOTATION + COLON + "-1" +
+                         COMMA +
+                         QUOTATION + ERROR_KEY + QUOTATION + COLON +
+                         QUOTATION + exception.getMessage() + QUOTATION +
+                         "}";
+
+        return builder;
+    }
+
+    public String toJSON(Object object) {
+        ObjectMapper mapper = new ObjectMapper();
+        try {
+            return mapper.writeValueAsString(object);
+        } catch (JsonProcessingException e) {
+            e.printStackTrace();
+            return e.getMessage();
+        }
+    }
+
+    public Map<String, Object> okMap(String k, Object v) {
+        Map<String, Object> map = new HashMap<>();
+        map.put(STATUS_KEY, 0);
+        map.put(k, v);
+        return map;
+    }
+
+    public <T extends MessageOrBuilder> String toJSON(List<T> values,
+                                                      JsonFormat.TypeRegistry registry) {
+
+        StringBuilder builder = new StringBuilder();
+        builder.append("{")
+               .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0")
+               .append(COMMA)
+               .append(QUOTATION).append("log").append(QUOTATION).append(COLON)
+               .append("[ ");
+        JsonFormat.Printer printer = JsonFormat.printer().usingTypeRegistry(registry);
+        if (values != null) {
+            values.forEach(s -> {
+                try {
+                    builder.append(printer.print(s));
+                } catch (InvalidProtocolBufferException e) {
+                    e.printStackTrace();
+                }
+                builder.append(",");
+            });
+            builder.deleteCharAt(builder.length() - 1);
+        }
+        builder.append("]}");
+        return builder.toString();
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java
new file mode 100644
index 0000000..68d80be
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.GraphRestRequest;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class GraphAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+    @Autowired
+    PDService pdService;
+
+    @GetMapping(value = "/graph/partitionSizeRange", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getPartitionSizeRange() {
+        try {
+            int minPartitionSize = 1;
+            int maxPartitionSize = pdService.getStoreNodeService().getShardGroups().size();
+            Map<String, Integer> dataMap = new HashMap<>();
+            dataMap.put("minPartitionSize", minPartitionSize);
+            dataMap.put("maxPartitionSize", maxPartitionSize);
+            return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error("PDException:", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    @GetMapping(value = "/graphs", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getGraphs() {
+        RestApiResponse response = new RestApiResponse();
+        try {
+            List<Metapb.Graph> graphs = pdRestService.getGraphs();
+            List<GraphStatistics> resultGraphs = new ArrayList<>();
+            for (Metapb.Graph graph : graphs) {
+                if ((graph.getGraphName() != null) && (graph.getGraphName().endsWith("/g"))) {
+                    resultGraphs.add(new GraphStatistics(graph));
+                }
+            }
+            HashMap<String, Object> dataMap = new HashMap<>();
+            dataMap.put("graphs", resultGraphs);
+            response.setData(dataMap);
+            response.setStatus(Pdpb.ErrorType.OK.getNumber());
+            response.setMessage(Pdpb.ErrorType.OK.name());
+
+        } catch (PDException e) {
+            log.error("PDException: ", e);
+            response.setData(new HashMap<String, Object>());
+            response.setStatus(e.getErrorCode());
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest request) {
+        try {
+            String requestURL = request.getRequestURL().toString();
+            final String prefix = "/graph/";
+            final int limit = 2;
+            String graphName = requestURL.split(prefix, limit)[1];
+            graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8);
+            Metapb.Graph curGraph = pdRestService.getGraph(graphName);
+            Metapb.Graph.Builder builder = Metapb.Graph.newBuilder(
+                    curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph);
+            builder.setGraphName(graphName);
+            if (body.getPartitionCount() > 0) {
+                builder.setPartitionCount(body.getPartitionCount());
+            }
+
+            Metapb.Graph newGraph = pdRestService.updateGraph(builder.build());
+            return toJSON(newGraph, "graph");
+        } catch (PDException exception) {
+            return toJSON(exception);
+        } catch (Exception e) {
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/graph/**", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getGraph(HttpServletRequest request) throws
+                                                                UnsupportedEncodingException {
+        RestApiResponse response = new RestApiResponse();
+        GraphStatistics statistics = null;
+        String requestURL = request.getRequestURL().toString();
+        final String prefix = "/graph/";
+        final int limit = 2;
+        String graphName = requestURL.split(prefix, limit)[1];
+        graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8);
+        try {
+            Metapb.Graph graph = pdRestService.getGraph(graphName);
+            if (graph != null) {
+                statistics = new GraphStatistics(graph);
+                response.setData(statistics);
+            } else {
+                response.setData(new HashMap<String, Object>()); //没有该图
+            }
+            response.setStatus(Pdpb.ErrorType.OK.getNumber());
+            response.setMessage(Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error(e.getMessage());
+            response.setData(new HashMap<String, Object>());
+            response.setStatus(Pdpb.ErrorType.UNKNOWN.getNumber());
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    @Data
+    class Shard {
+
+        long partitionId;
+        long storeId;
+        String state;
+        String role;
+        int progress;
+
+        public Shard(Metapb.ShardStats shardStats, long partitionId) {
+            this.role = String.valueOf(shardStats.getRole());
+            this.storeId = shardStats.getStoreId();
+            this.state = String.valueOf(shardStats.getState());
+            this.partitionId = partitionId;
+            this.progress = shardStats.getProgress();
+        }
+
+        public Shard(Metapb.Shard shard, long partitionId) {
+            this.role = String.valueOf(shard.getRole());
+            this.storeId = shard.getStoreId();
+            this.state = Metapb.ShardState.SState_Normal.name(); //gshard的状态默认为normal
+            this.progress = 0;
+            this.partitionId = partitionId;
+        }
+
+    }
+
+    @Data
+    class Partition {
+
+        int partitionId;
+        String graphName;
+        String workState;
+        long startKey;
+        long endKey;
+        List<Shard> shards;
+        long dataSize;
+
+        public Partition(Metapb.Partition pt, Metapb.PartitionStats partitionStats) {
+            if (pt != null) {
+                partitionId = pt.getId();
+                startKey = pt.getStartKey();
+                endKey = pt.getEndKey();
+                workState = String.valueOf(pt.getState());
+                graphName = pt.getGraphName();
+                final int postfixLength = 2;
+                graphName = graphName.substring(0, graphName.length() - postfixLength);
+                if (partitionStats != null) {
+                    List<Metapb.ShardStats> shardStatsList = partitionStats.getShardStatsList();
+                    List<Shard> shardsList = new ArrayList<>();
+                    for (Metapb.ShardStats shardStats : shardStatsList) {
+                        Shard shard = new Shard(shardStats, partitionId);
+                        shardsList.add(shard);
+                    }
+                    this.shards = shardsList;
+                } else {
+                    List<Shard> shardsList = new ArrayList<>();
+                    try {
+                        var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId());
+                        if (shardGroup != null) {
+                            for (Metapb.Shard shard1 : shardGroup.getShardsList()) {
+                                shardsList.add(new Shard(shard1, partitionId));
+                            }
+                        } else {
+                            log.error("GraphAPI.Partition(), get shard group: {} returns null",
+                                      pt.getId());
+                        }
+                    } catch (PDException e) {
+                        log.error("Partition init failed, error: {}", e.getMessage());
+                    }
+                    this.shards = shardsList;
+                }
+
+            }
+        }
+    }
+
+    @Data
+    class GraphStatistics {
+
+        //图统计信息
+        String graphName;
+        long partitionCount;
+        String state;
+        List<Partition> partitions;
+        long dataSize;
+        //todo
+        int nodeCount;
+        int edgeCount;
+        long keyCount;
+
+        public GraphStatistics(Metapb.Graph graph) throws PDException {
+            if (graph == null) {
+                return;
+            }
+            Map<Integer, Long> partition2DataSize = new HashMap<>();
+            graphName = graph.getGraphName();
+            partitionCount = graph.getPartitionCount();
+            state = String.valueOf(graph.getState());
+            // 数据量及key的数量
+            List<Metapb.Store> stores = pdRestService.getStores(graphName);
+            for (Metapb.Store store : stores) {
+                List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList();
+                for (Metapb.GraphStats graphStats : graphStatsList) {
+                    if ((graphName.equals(graphStats.getGraphName()))
+                        && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) {
+                        keyCount += graphStats.getApproximateKeys();
+                        dataSize += graphStats.getApproximateSize();
+                        partition2DataSize.put(graphStats.getPartitionId(),
+                                               graphStats.getApproximateSize());
+                    }
+                }
+            }
+            List<Partition> resultPartitionList = new ArrayList<>();
+            List<Metapb.Partition> tmpPartitions = pdRestService.getPartitions(graphName);
+            if ((tmpPartitions != null) && (!tmpPartitions.isEmpty())) {
+                // 需要返回的分区信息
+                for (Metapb.Partition partition : tmpPartitions) {
+                    Metapb.PartitionStats partitionStats = pdRestService
+                            .getPartitionStats(graphName, partition.getId());
+                    Partition pt = new Partition(partition, partitionStats);
+                    pt.dataSize = partition2DataSize.getOrDefault(partition.getId(), 0L);
+                    resultPartitionList.add(pt);
+                }
+            }
+            partitions = resultPartitionList;
+            // 隐去图名后面的 /g /m /s
+            final int postfixLength = 2;
+            graphName = graphName.substring(0, graphName.length() - postfixLength);
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java
new file mode 100644
index 0000000..388f842
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.model.GraphSpaceRestRequest;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class GraphSpaceAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+
+    @GetMapping(value = "/graph-spaces", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getGraphSpaces() {
+        try {
+            List<Metapb.GraphSpace> graphSpaces = pdRestService.getGraphSpaces();
+            return toJSON(graphSpaces, "graph-spaces");
+        } catch (PDException e) {
+            e.printStackTrace();
+            return toJSON(e);
+        }
+    }
+
+    @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String setGraphSpace(@RequestBody GraphSpaceRestRequest body,
+                                HttpServletRequest request) {
+        try {
+            String requestURL = request.getRequestURL().toString();
+            String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1];
+            graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8);
+            Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder()
+                                                            .setName(graphSpaceName)
+                                                            .setStorageLimit(body.getStorageLimit())
+                                                            .build();
+            Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace);
+            return toJSON(newGraphSpace, "graph-spaces");
+        } catch (PDException exception) {
+            return toJSON(exception);
+        } catch (Exception e) {
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/graph-spaces/**", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getGraphSpace(HttpServletRequest request) {
+        try {
+            String requestURL = request.getRequestURL().toString();
+            String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1];
+            graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8);
+            Metapb.GraphSpace graphSpace = pdRestService.getGraphSpace(graphSpaceName);
+            return toJSON(graphSpace, "graphs-paces");
+        } catch (PDException exception) {
+            return toJSON(exception);
+        } catch (Exception e) {
+            return toJSON(e);
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java
new file mode 100644
index 0000000..89f6e86
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/")
+public class IndexAPI extends API {
+
+    @Autowired
+    PDService pdService;
+    @Autowired
+    PDRestService pdRestService;
+
+    @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public BriefStatistics index() throws PDException, ExecutionException, InterruptedException {
+
+        BriefStatistics statistics = new BriefStatistics();
+        statistics.leader = RaftEngine.getInstance().getLeaderGrpcAddress();
+        statistics.state = pdService.getStoreNodeService().getClusterStats().getState().toString();
+        statistics.storeSize = pdService.getStoreNodeService().getActiveStores().size();
+        statistics.graphSize = pdService.getPartitionService().getGraphs().size();
+        statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size();
+        return statistics;
+
+    }
+
+    @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse cluster() throws InterruptedException, ExecutionException {
+        Statistics statistics = new Statistics();
+        try {
+            statistics.state =
+                    String.valueOf(pdService.getStoreNodeService().getClusterStats().getState());
+            String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress();
+            CallStreamObserverWrap<Pdpb.GetMembersResponse> response =
+                    new CallStreamObserverWrap<>();
+            pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response);
+            List<Member> pdList = new ArrayList<>();
+            for (Metapb.Member member : response.get().get(0).getMembersList()) {
+                Member member1 = new Member(member);
+                if ((leaderGrpcAddress != null) &&
+                    (leaderGrpcAddress.equals(member.getGrpcUrl()))) {
+                    member1.role = "Leader";
+                    statistics.pdLeader = member1;
+                } else {
+                    member1.role = "Follower";
+                }
+                pdList.add(member1);
+            }
+            statistics.pdList = pdList;
+            statistics.memberSize = pdList.size();
+            List<Store> stores = new ArrayList<>();
+            for (Metapb.Store store : pdService.getStoreNodeService().getStores()) {
+                stores.add(new Store(store));
+            }
+            statistics.stores = stores;
+            statistics.storeSize = statistics.stores.size();
+            statistics.onlineStoreSize = pdService.getStoreNodeService().getActiveStores().size();
+            statistics.offlineStoreSize = statistics.storeSize - statistics.onlineStoreSize;
+            List<Metapb.Graph> graphs = pdRestService.getGraphs();
+            // 图的数量,只统计/g
+            statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null)
+                                                                 &&
+                                                                 (g.getGraphName().endsWith("/g")))
+                                         .count();
+            statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size();
+            statistics.shardCount = pdService.getConfigService().getPDConfig().getShardCount();
+            for (Metapb.Store store : pdService.getStoreNodeService().getStores()) {
+                List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList();
+                for (Metapb.GraphStats graphStats : graphStatsList) {
+                    statistics.keyCount += graphStats.getApproximateKeys();
+                    statistics.dataSize += graphStats.getApproximateSize();
+                }
+            }
+            // 数据状态:根据图的状态推出数据状态,枚举值越大,问题越严重, 默认为正常状态
+            Metapb.PartitionState dataState = Metapb.PartitionState.PState_Normal;
+            for (Metapb.Graph graph : pdRestService.getGraphs()) {
+                if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) {
+                    continue; // 未识别不参与对比,不然会抛出异常
+                }
+                if ((graph.getState() != null) &&
+                    (graph.getState().getNumber() > dataState.getNumber())) {
+                    dataState = graph.getState();
+                }
+            }
+            statistics.dataState = dataState.name();
+            return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error("PD Exception: ", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    @Data
+    class BriefStatistics {
+
+        String state;
+        String leader;
+        int memberSize;
+        int storeSize;
+        int graphSize;
+        int partitionSize;
+    }
+
+    @Data
+    class Store {
+
+        long storeId;
+        String address;
+        String raftAddress;
+        String version;
+        String state;
+        long startTimeStamp;
+
+        public Store(Metapb.Store store) {
+            if (store != null) {
+                storeId = store.getId();
+                address = store.getAddress();
+                raftAddress = store.getRaftAddress();
+                version = store.getVersion();
+                state = String.valueOf(store.getState());
+                startTimeStamp = store.getStartTimestamp();
+            }
+
+        }
+    }
+
+    @Data
+    class Member {
+
+        String raftUrl;
+        String grpcUrl;
+        String restUrl;
+        String state;
+        String dataPath;
+        String role;
+        String serviceName; //服务名称,自定义属性
+        String serviceVersion; //静态定义
+        long startTimeStamp; //进程启动时间
+
+        public Member(Metapb.Member member) {
+            if (member != null) {
+                raftUrl = member.getRaftUrl();
+                grpcUrl = member.getGrpcUrl();
+                restUrl = member.getRestUrl();
+                state = String.valueOf(member.getState());
+                dataPath = member.getDataPath();
+                serviceName = grpcUrl + "-PD";
+                serviceVersion = VERSION;
+                startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime();
+            }
+        }
+
+        public Member() {
+
+        }
+    }
+
+    @Data
+    class Statistics {
+
+        /**
+         * 集群状态
+         */
+        String state;
+        /**
+         * 数据状态
+         */
+        String dataState;
+        /**
+         * pd集群成员
+         */
+        List<Member> pdList;
+        /**
+         * pd集群的leader
+         */
+        Member pdLeader;
+        /**
+         * pd集群的大小
+         */
+        int memberSize;
+        /**
+         * stores列表
+         */
+        List<Store> stores;
+        /**
+         * store的数量
+         */
+        int storeSize;
+        /**
+         * onlineStore
+         */
+        int onlineStoreSize;
+        /**
+         * 离线的store的数量
+         */
+        int offlineStoreSize;
+        /**
+         * 图的数量
+         */
+        long graphSize;
+        /**
+         * 分区的数量
+         */
+        int partitionSize;
+        /**
+         * 分区副本数
+         */
+        int shardCount;
+        /**
+         * key的数量
+         */
+        long keyCount;
+        /**
+         * 数据量
+         */
+        long dataSize;
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java
new file mode 100644
index 0000000..d0078b5
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.PeerRestRequest;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import io.grpc.stub.CallStreamObserver;
+import io.grpc.stub.StreamObserver;
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class MemberAPI extends API {
+
+    //TODO
+    @Autowired
+    PDService pdService;
+
+    @GetMapping(value = "/members", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getMembers() throws InterruptedException, ExecutionException {
+
+        String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress();
+        CallStreamObserverWrap<Pdpb.GetMembersResponse> response = new CallStreamObserverWrap<>();
+        pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response);
+        List<Member> members = new ArrayList<>();
+        Member leader = null;
+        Map<String, Integer> stateCountMap = new HashMap<>();
+        for (Metapb.Member member : response.get().get(0).getMembersList()) {
+            String stateKey = member.getState().name();
+            stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1);
+            Member member1 = new Member(member);
+            if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) {
+                leader = member1;
+            }
+            member1.role = member.getRole().name();
+            members.add(member1);
+        }
+        String state = pdService.getStoreNodeService().getClusterStats().getState().toString();
+        HashMap<String, Object> resultMap = new HashMap<>();
+        resultMap.put("state", state);
+        resultMap.put("pdList", members);
+        resultMap.put("pdLeader", leader);
+        resultMap.put("numOfService", members.size());
+        resultMap.put("numOfNormalService",
+                      stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0));
+        resultMap.put("stateCountMap", stateCountMap);
+        return new RestApiResponse(resultMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+    }
+
+    @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) {
+        try {
+            Pdpb.ChangePeerListRequest rpcRequest =
+                    Pdpb.ChangePeerListRequest.newBuilder().setPeerList(
+                            body.getPeerList()).build();
+            CountDownLatch latch = new CountDownLatch(1);
+            final Pdpb.ResponseHeader[] responseHeader = {null};
+            StreamObserver<Pdpb.getChangePeerListResponse> observer =
+                    new StreamObserver<Pdpb.getChangePeerListResponse>() {
+                        @Override
+                        public void onNext(Pdpb.getChangePeerListResponse value) {
+                            responseHeader[0] = value.getHeader();
+                        }
+
+                        @Override
+                        public void onError(Throwable t) {
+                            responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError(
+                                    Pdpb.Error.newBuilder().setType(
+                                            Pdpb.ErrorType.UNKNOWN).setMessage(
+                                            t.getMessage()).build()).build();
+                            latch.countDown();
+                        }
+
+                        @Override
+                        public void onCompleted() {
+                            latch.countDown();
+                        }
+                    };
+            pdService.changePeerList(rpcRequest, observer);
+            latch.await();
+            return toJSON(responseHeader[0], "changeResult");
+        } catch (Exception e) {
+            return toJSON(e);
+        }
+    }
+
+    public static class CallStreamObserverWrap<V> extends CallStreamObserver<V> implements
+                                                                                Future<List<V>> {
+
+        CompletableFuture<List<V>> future = new CompletableFuture<>();
+        List<V> values = new ArrayList<>();
+
+        @Override
+        public boolean isReady() {
+            return false;
+        }
+
+        @Override
+        public void setOnReadyHandler(Runnable runnable) {
+
+        }
+
+        @Override
+        public void disableAutoInboundFlowControl() {
+
+        }
+
+        @Override
+        public void request(int i) {
+
+        }
+
+        @Override
+        public void setMessageCompression(boolean b) {
+
+        }
+
+        @Override
+        public void onNext(V v) {
+            values.add(v);
+        }
+
+        @Override
+        public void onError(Throwable throwable) {
+            future.completeExceptionally(throwable);
+        }
+
+        @Override
+        public void onCompleted() {
+            future.complete(values);
+        }
+
+        @Override
+        public boolean cancel(boolean mayInterruptIfRunning) {
+            return future.cancel(mayInterruptIfRunning);
+        }
+
+        @Override
+        public boolean isCancelled() {
+            return future.isCancelled();
+        }
+
+        @Override
+        public boolean isDone() {
+            return future.isDone();
+        }
+
+        @Override
+        public List<V> get() throws InterruptedException, ExecutionException {
+            return future.get();
+        }
+
+        @Override
+        public List<V> get(long timeout, TimeUnit unit) throws InterruptedException,
+                                                               ExecutionException,
+                                                               TimeoutException {
+            return future.get(timeout, unit);
+        }
+    }
+
+    @Data
+    class Member {
+
+        String raftUrl;
+        String grpcUrl;
+        String restUrl;
+        String state;
+        String dataPath;
+        String role;
+        String replicateState;
+        String serviceName; //服务名称,自定义属性
+        String serviceVersion; //静态定义
+        long startTimeStamp; //启动时间,暂时取进程的启动时间
+
+        public Member(Metapb.Member member) {
+            if (member != null) {
+                raftUrl = member.getRaftUrl();
+                grpcUrl = member.getGrpcUrl();
+                restUrl = member.getRestUrl();
+                state = String.valueOf(member.getState());
+                dataPath = member.getDataPath();
+                serviceName = grpcUrl + "-PD";
+                serviceVersion = VERSION;
+                startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime();
+                replicateState = member.getReplicatorState();
+            }
+
+        }
+
+        @Useless("delete later")
+        public Member() {}
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java
new file mode 100644
index 0000000..bdbdec3
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java
@@ -0,0 +1,478 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.commons.lang.time.DateFormatUtils;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.model.TimeRangeRequest;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.util.DateUtil;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import com.google.protobuf.util.JsonFormat;
+
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class PartitionAPI extends API {
+
+    public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
+    @Autowired
+    PDRestService pdRestService;
+
+    @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE)
+    public RestApiResponse getHighLevelPartitions() {
+        // 分区下多个图的信息
+        Map<Integer, Map<String, GraphStats>> partitions2GraphsMap = new HashMap<>();
+        Map<Integer, HighLevelPartition> resultPartitionsMap = new HashMap<>();
+        // 每一个分区的keyCount, 只从leader处取出
+        Map<Integer, Long> partition2KeyCount = new HashMap<>();
+        // 每一个分区的dataSize, 只从leader处取出
+        Map<Integer, Long> partition2DataSize = new HashMap<>();
+        List<Metapb.Store> stores;
+        Map<Long, Metapb.Store> storesMap = new HashMap<>();
+        try {
+            stores = pdRestService.getStores("");
+        } catch (PDException e) {
+            log.error("getStores error", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+        for (Metapb.Store store : stores) {
+            storesMap.put(store.getId(), store);
+            List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList();
+            for (Metapb.GraphStats graphStats : graphStatsList) {
+                // 获取分区保存的图信息(只从leader处取出来)
+                if (Metapb.ShardRole.Leader != graphStats.getRole()) {
+                    continue;
+                }
+                // 计算分区的keyCount(不区分图)
+                partition2KeyCount.put(graphStats.getPartitionId(),
+                                       partition2KeyCount.getOrDefault(graphStats.getPartitionId(),
+                                                                       graphStats.getApproximateKeys()));
+                // 计算分区的dataSize, 通过累加图的大小实现
+                partition2DataSize.put(graphStats.getPartitionId(),
+                                       partition2DataSize.getOrDefault(graphStats.getPartitionId(),
+                                                                       0L)
+                                       + graphStats.getApproximateSize());
+                // 构造分区下的图信息
+                if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) {
+                    partitions2GraphsMap.put(graphStats.getPartitionId(),
+                                             new HashMap<String, GraphStats>());
+                }
+                Map<String, GraphStats> partitionGraphsMap =
+                        partitions2GraphsMap.get(graphStats.getPartitionId());
+                partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats));
+            }
+        }
+        // 构造分区的所有需返回的信息
+        List<Metapb.Partition> partitionList = pdRestService.getPartitions("");
+        for (Metapb.Partition partition : partitionList) {
+            // 补充分区内图信息的startKey, endKey
+            if (partitions2GraphsMap.get(partition.getId()) != null) {
+                GraphStats graphStats =
+                        partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName());
+                if (graphStats != null) {
+                    graphStats.startKey = partition.getStartKey();
+                    graphStats.endKey = partition.getEndKey();
+                }
+            }
+            // 构造分区整体信息(不区分图)
+            if ((resultPartitionsMap.get(partition.getId()) == null)
+                && (!partition.getGraphName().endsWith("/s"))
+            ) {
+                Metapb.PartitionStats partitionStats;
+                try {
+                    partitionStats = pdRestService.getPartitionStats(partition.getGraphName(),
+                                                                     partition.getId());
+                } catch (PDException e) {
+                    log.error("getPartitionStats error", e);
+                    partitionStats = null;
+                }
+                // 初始化分区信息
+                HighLevelPartition resultPartition =
+                        new HighLevelPartition(partition, partitionStats);
+                resultPartition.keyCount =
+                        partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L);
+                resultPartition.dataSize =
+                        partition2DataSize.getOrDefault(resultPartition.partitionId, 0L);
+                for (ShardStats shard : resultPartition.shards) {
+                    // 对副本的地址,分区信息赋值
+                    shard.address = storesMap.get(shard.storeId).getAddress();
+                    shard.partitionId = partition.getId();
+                }
+                if ((partitionStats != null) && (partitionStats.getLeader() != null)) {
+                    long storeId = partitionStats.getLeader().getStoreId(); // 获取leader的storeId
+                    resultPartition.leaderAddress =
+                            storesMap.get(storeId).getAddress(); // 获取leader的address
+                }
+                resultPartitionsMap.put(partition.getId(), resultPartition);
+            }
+        }
+        // 构造需返回的分区下的图列表,只返回/g, 且按名称排序
+        for (Map.Entry<Integer, HighLevelPartition> entry : resultPartitionsMap.entrySet()) {
+            Integer partitionId = entry.getKey();
+            HighLevelPartition currentPartition = resultPartitionsMap.get(partitionId);
+            Map<String, GraphStats> graphsMap = partitions2GraphsMap
+                    .getOrDefault(partitionId, new HashMap<>()); // 避免后面出现空指针异常
+            ArrayList<GraphStats> graphsList = new ArrayList<>();
+            for (Map.Entry<String, GraphStats> entry1 : graphsMap.entrySet()) {
+                if (!entry1.getKey().endsWith("/g")) {
+                    continue; // 只保留/g的图
+                }
+                String graphName = entry1.getKey();
+                GraphStats tmpGraph = graphsMap.get(graphName);
+                final int postfixLength = 2;
+                tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() -
+                                                                     postfixLength);
+                graphsList.add(tmpGraph);
+            }
+            graphsList.sort((o1, o2) -> o1.graphName.compareTo(o2.graphName));
+            currentPartition.graphs = graphsList;
+        }
+        List<HighLevelPartition> resultPartitionList = new ArrayList<>();
+        if (!resultPartitionsMap.isEmpty()) {
+            ArrayList<Integer> partitionids = new ArrayList(resultPartitionsMap.keySet());
+            partitionids.sort((o1, o2) -> o1.intValue() - o2.intValue());
+            for (Integer partitionId : partitionids) {
+                resultPartitionList.add(resultPartitionsMap.get(partitionId));
+            }
+        }
+        HashMap<String, Object> dataMap = new HashMap<>();
+        dataMap.put("partitions", resultPartitionList);
+        return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+    }
+
+    @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE)
+    public RestApiResponse getPartitions() {
+        try {
+            List<Partition> partitions = new ArrayList<>();//需返回的分区对象
+            List<Metapb.Partition> partitionList = pdRestService.getPartitions("");
+            List<Metapb.Store> stores = pdRestService.getStoreStats(false);
+            //分区的raftNode的状态
+            HashMap<Long, HashMap<Integer, Metapb.RaftStats>> raftMap = new HashMap<>();
+
+            HashMap<Long, HashMap<String, Metapb.GraphStats>> shardIndexMap = new HashMap<>();
+            String delimiter = "@";
+            for (int i = 0; i < stores.size(); i++) {
+                Metapb.Store store = stores.get(i);
+                Metapb.StoreStats storeStats = store.getStats();
+                HashMap<Integer, Metapb.RaftStats> storeRaftStats = new HashMap<>();
+                List<Metapb.RaftStats> raftStatsList = storeStats.getRaftStatsList();
+                for (int j = 0; j < raftStatsList.size(); j++) {
+                    Metapb.RaftStats raftStats = raftStatsList.get(j);
+                    storeRaftStats.put(raftStats.getPartitionId(), raftStats);
+                }
+
+                HashMap<String, Metapb.GraphStats> partitionShardStats = new HashMap<>();
+                List<Metapb.GraphStats> graphStatsList = storeStats.getGraphStatsList();
+                StringBuilder builder = new StringBuilder();
+                for (int j = 0; j < graphStatsList.size(); j++) {
+                    Metapb.GraphStats graphStats = graphStatsList.get(j);
+                    String graphName = graphStats.getGraphName();
+                    String partitionId = Integer.toString(graphStats.getPartitionId());
+                    builder.append(graphName).append(delimiter).append(partitionId);
+                    partitionShardStats.put(builder.toString(), graphStats);
+                    builder.setLength(0);
+                }
+                raftMap.put(store.getId(), storeRaftStats);
+                shardIndexMap.put(store.getId(), partitionShardStats);
+            }
+
+            for (Metapb.Partition pt : partitionList) {
+                Partition partition = new Partition(pt);
+                String graphName = partition.getGraphName();
+                partition.getShards().sort(Comparator.comparing(Shard::getStoreId));
+                Metapb.PartitionStats partitionStats =
+                        pdRestService.getPartitionStats(graphName, pt.getId());
+                Map<Long, Metapb.ShardStats> shardStats = new HashMap<>();
+                if (partitionStats != null) {
+                    String dateTime = DateFormatUtils.format(
+                            partitionStats.getTimestamp(), DEFAULT_DATETIME_FORMAT);
+                    partition.setTimestamp(dateTime);
+                    shardStats = getShardStats(partitionStats);
+                }
+
+                for (Metapb.Shard shard : pdRestService.getShardList(pt.getId())) {
+                    Map<Long, Metapb.ShardStats> finalShardStats = shardStats;
+                    partition.getShards().add(new Shard() {{
+                        storeId = Long.toString(shard.getStoreId());
+                        role = shard.getRole();
+                        address = pdRestService.getStore(
+                                shard.getStoreId()).getAddress();
+                        if (finalShardStats.containsKey(shard.getStoreId())) {
+                            state = finalShardStats.get(shard.getStoreId()).getState().toString();
+                            progress = finalShardStats.get(shard.getStoreId()).getProgress();
+                            role = finalShardStats.get(shard.getStoreId()).getRole();
+                        }
+
+                        HashMap<Integer, Metapb.RaftStats> storeRaftStats =
+                                raftMap.get(shard.getStoreId());
+                        if (storeRaftStats != null) {
+                            Metapb.RaftStats raftStats = storeRaftStats.get(partition.getId());
+                            if (raftStats != null) {
+                                committedIndex = Long.toString(raftStats.getCommittedIndex());
+                            }
+                        }
+                    }});
+                }
+
+                partition.setPartitionStats(partitionStats);
+
+                partitions.add(partition);
+            }
+            partitions.sort(
+                    Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId));
+            HashMap<String, Object> dataMap = new HashMap<>();
+            dataMap.put("partitions", partitions);
+            return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error("query metric data error", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    @GetMapping(value = "/partitionsAndStats", produces = MediaType.APPLICATION_JSON_VALUE)
+    public String getPartitionsAndStats() {
+        //for debug use, return partition && partitionStats
+        try {
+            Map<String, List<Metapb.Partition>> graph2Partitions = new HashMap<>();
+            Map<String, List<Metapb.PartitionStats>> graph2PartitionStats = new HashMap<>();
+            for (Metapb.Graph graph : pdRestService.getGraphs()) {
+                List<Metapb.Partition> partitionList = new ArrayList<>();
+                List<Metapb.PartitionStats> partitionStatsList = new ArrayList<>();
+                for (Metapb.Partition partition : pdRestService.getPartitions(
+                        graph.getGraphName())) {
+                    Metapb.PartitionStats partitionStats = pdRestService
+                            .getPartitionStats(graph.getGraphName(), partition.getId());
+                    partitionList.add(partition);
+                    partitionStatsList.add(partitionStats);
+                }
+                graph2Partitions.put(graph.getGraphName(), partitionList);
+                graph2PartitionStats.put(graph.getGraphName(), partitionStatsList);
+            }
+            String builder = "{\"partitions\":" + toJSON(graph2Partitions) +
+                             ",\"partitionStats\":" + toJSON(graph2PartitionStats) + "}";
+            return builder;
+        } catch (PDException e) {
+            log.error("PD exception:" + e);
+            return toJSON(e);
+        }
+    }
+
+    private Map<Long, Metapb.ShardStats> getShardStats(Metapb.PartitionStats partitionStats) {
+        Map<Long, Metapb.ShardStats> stats = new HashMap<>();
+        if (partitionStats.getShardStatsList() != null) {
+            partitionStats.getShardStatsList().forEach(shardStats -> {
+                stats.put(shardStats.getStoreId(), shardStats);
+            });
+        }
+        return stats;
+    }
+
+    @PostMapping(value = "/partitions/log", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getPartitionLog(@RequestBody TimeRangeRequest request) {
+        try {
+            Date dateStart = DateUtil.getDate(request.getStartTime());
+            Date dateEnd = DateUtil.getDate(request.getEndTime());
+            List<Metapb.LogRecord> changedRecords =
+                    pdRestService.getPartitionLog(dateStart.getTime(),
+                                                  dateEnd.getTime());
+            if (changedRecords != null) {
+                JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry
+                        .newBuilder().add(Pdpb.SplitDataRequest.getDescriptor()).build();
+                return toJSON(changedRecords, registry);
+            } else {
+                return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error"));
+            }
+        } catch (PDException e) {
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public Statistics getStatistics() throws PDException, ExecutionException, InterruptedException {
+
+        Statistics statistics = new Statistics();
+        int partitionId = -1;
+        return statistics;
+    }
+
+    @Data
+    class Shard {
+
+        String address;
+        String storeId;
+        Metapb.ShardRole role;
+        String state;
+        int progress;
+        String committedIndex;
+        long partitionId;
+
+    }
+
+    @Data
+    class Partition {
+
+        int id;
+        long version;
+        String graphName;
+        long startKey;
+        long endKey;
+
+        Metapb.PartitionState workState;
+        List<Shard> shards;
+        String timestamp;
+
+        Partition(Metapb.Partition pt) {
+            id = pt.getId();
+            version = pt.getVersion();
+            graphName = pt.getGraphName();
+            startKey = pt.getStartKey();
+            endKey = pt.getEndKey();
+            workState = pt.getState();
+            shards = new ArrayList<>();
+        }
+
+        public void setPartitionStats(Metapb.PartitionStats stats) {
+
+        }
+    }
+
+    @Data
+    class Statistics {
+
+    }
+
+    @Data
+    class HighLevelPartition {
+
+        int partitionId;
+        String state;
+        String leaderAddress;
+        long keyCount;
+        long dataSize;
+        String shardState;
+        int progress;
+        long raftTerm; //任期
+        List<GraphStats> graphs;
+        List<ShardStats> shards;
+        String failureCause = "";
+
+        HighLevelPartition(Metapb.Partition partition, Metapb.PartitionStats partitionStats) {
+            partitionId = partition.getId();
+            state = String.valueOf(partition.getState());
+            if (partitionStats != null) {
+                raftTerm = partitionStats.getLeaderTerm();
+            }
+            Metapb.ShardState tmpShardState = Metapb.ShardState.SState_Normal;
+            if (partitionStats != null) {
+                shards = new ArrayList<>();
+                for (Metapb.ShardStats shardStats : partitionStats.getShardStatsList()) {
+                    if ((shardStats.getState() != Metapb.ShardState.UNRECOGNIZED)
+                        && (shardStats.getState().getNumber() > tmpShardState.getNumber())) {
+                        tmpShardState = shardStats.getState();
+                        progress = shardStats.getProgress();
+                    }
+                    shards.add(new ShardStats(shardStats));
+                }
+            } else {
+                shards = new ArrayList<>();
+                try {
+                    for (Metapb.Shard shard : pdRestService.getShardList(partition.getId())) {
+                        shards.add(new ShardStats(shard));
+                    }
+                } catch (PDException e) {
+                    log.error("get shard list failed, {}", e.getMessage());
+                }
+            }
+            // 综合所有副本的状态,给shardState赋值
+            shardState = tmpShardState.name();
+        }
+    }
+
+    @Data
+    class GraphStats {
+
+        String graphName;
+        long keyCount;
+        long startKey;
+        long endKey;
+        long dataSize;
+        String workState;
+        long partitionId;
+
+        GraphStats(Metapb.GraphStats graphStats) {
+            graphName = graphStats.getGraphName();
+            keyCount = graphStats.getApproximateKeys();
+            workState = graphStats.getWorkState().toString();
+            dataSize = graphStats.getApproximateSize();
+            partitionId = graphStats.getPartitionId();
+        }
+    }
+
+    @Data
+    class ShardStats {
+
+        long storeId;
+        String role;
+        String state;
+        int progress;
+        //额外属性
+        long partitionId;
+        String address;
+
+        ShardStats(Metapb.ShardStats shardStats) {
+            storeId = shardStats.getStoreId();
+            role = String.valueOf(shardStats.getRole());
+            state = shardStats.getState().toString();
+            progress = shardStats.getProgress();
+        }
+
+        ShardStats(Metapb.Shard shard) {
+            //当没有shardStats的初始化方法
+            storeId = shard.getStoreId();
+            role = String.valueOf(shard.getRole());
+            state = Metapb.ShardState.SState_Normal.name();
+            progress = 0;
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java
new file mode 100644
index 0000000..9f16181
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.hugegraph.pd.model.PromTargetsModel;
+import org.apache.hugegraph.pd.service.PromTargetsService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * TODO: ensure if we need this class & method (seems used for prometheus)
+ */
+@RestController
+@Slf4j
+@RequestMapping("/v1/prom")
+public class PromTargetsAPI {
+
+    @Autowired
+    private PromTargetsService service;
+
+    @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE)
+    public ResponseEntity<List<PromTargetsModel>> getPromTargets(@PathVariable(value = "appName",
+                                                                               required = true)
+                                                                 String appName) {
+        return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName)));
+    }
+
+    @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE)
+    public ResponseEntity<List<PromTargetsModel>> getPromAllTargets() {
+        return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets()));
+    }
+
+    @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE)
+    public List<PromTargetsModel> getDemoTargets(@PathVariable(value = "appName",
+                                                               required = true) String targetType) {
+        // TODO: ensure the IP addr is correct & useful
+        PromTargetsModel model = null;
+        switch (targetType) {
+            case "node":
+                model = PromTargetsModel.of()
+                                        .addTarget("10.14.139.26:8100")
+                                        .addTarget("10.14.139.27:8100")
+                                        .addTarget("10.14.139.28:8100")
+                                        .setMetricsPath("/metrics")
+                                        .setScheme("http");
+                break;
+            case "store":
+                model = PromTargetsModel.of()
+                                        .addTarget("172.20.94.98:8521")
+                                        .addTarget("172.20.94.98:8522")
+                                        .addTarget("172.20.94.98:8523")
+                                        .setMetricsPath("/actuator/prometheus")
+                                        .setScheme("http");
+                break;
+            case "pd":
+                model = PromTargetsModel.of()
+                                        .addTarget("172.20.94.98:8620")
+                                        .setMetricsPath("/actuator/prometheus");
+
+                break;
+            default:
+
+        }
+        return Collections.singletonList(model);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java
new file mode 100644
index 0000000..482eac4
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.model.RegistryQueryRestRequest;
+import org.apache.hugegraph.pd.model.RegistryRestRequest;
+import org.apache.hugegraph.pd.model.RegistryRestResponse;
+import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class RegistryAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+    @Autowired
+    PDService pdService;
+
+    @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RegistryRestResponse register(@RequestBody RegistryRestRequest body,
+                                         HttpServletRequest request) {
+        RegistryRestResponse registryResponse = null;
+        try {
+            long interval = Long.valueOf(body.getInterval()).longValue();
+            NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName())
+                                    .setVersion(body.getVersion())
+                                    .setAddress(body.getAddress()).putAllLabels(body.getLabels())
+                                    .setInterval(interval).build();
+            registryResponse = pdRestService.register(info);
+        } catch (PDException e) {
+            registryResponse = new RegistryRestResponse();
+            registryResponse.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            registryResponse.setMessage(e.getMessage());
+        } catch (PDRuntimeException e) {
+            registryResponse = new RegistryRestResponse();
+            registryResponse.setErrorType(Pdpb.ErrorType.LICENSE_VERIFY_ERROR);
+            registryResponse.setMessage(e.getMessage());
+        }
+        return registryResponse;
+    }
+
+    @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body,
+                                        HttpServletRequest request) {
+        RegistryRestResponse response = new RegistryRestResponse();
+        try {
+            boolean labelNotEmpty = body.getLabels() != null && !body.getLabels().isEmpty();
+            Query query = Query.newBuilder()
+                               .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" :
+                                           body.getAppName())
+                               .putAllLabels(labelNotEmpty ? body.getLabels() : new HashMap<>())
+                               .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" :
+                                           body.getVersion())
+                               .build();
+            ArrayList<RegistryRestRequest> registryResponse = pdRestService.getNodeInfo(query);
+            response.setErrorType(Pdpb.ErrorType.OK);
+            response.setData(registryResponse);
+        } catch (Exception e) {
+            log.warn(e.getMessage());
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    @GetMapping(value = "/allInfo", consumes = MediaType.APPLICATION_JSON_VALUE,
+                produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RegistryRestResponse allInfo(HttpServletRequest request) {
+        RegistryRestResponse response = new RegistryRestResponse();
+        try {
+            //1.normal registry
+            Query query =
+                    Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("")
+                         .build();
+            ArrayList<RegistryRestRequest> registryResponse = pdRestService.getNodeInfo(query);
+            //2.pd member
+            LinkedList<RegistryRestRequest> pdMembers = getMembers();
+            //3.store member
+            List<Metapb.Store> stores = pdRestService.getStores("");
+            LinkedList<RegistryRestRequest> storeMembers = new LinkedList<>();
+            for (Metapb.Store store : stores) {
+                RegistryRestRequest restRequest = new RegistryRestRequest();
+                restRequest.setAddress(store.getAddress());
+                restRequest.setVersion(store.getVersion());
+                restRequest.setAppName(STORE);
+                restRequest.setId(String.valueOf(store.getId()));
+                storeMembers.add(restRequest);
+            }
+            response.setErrorType(Pdpb.ErrorType.OK);
+            HashMap<String, Serializable> result = new HashMap<>();
+            result.put("other", registryResponse);
+            result.put(PD, pdMembers);
+            result.put(STORE, storeMembers);
+            response.setData(result);
+        } catch (Exception e) {
+            log.warn(e.getMessage());
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    private LinkedList<RegistryRestRequest> getMembers() throws Exception {
+        CallStreamObserverWrap<GetMembersResponse> response = new CallStreamObserverWrap<>();
+        pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response);
+        LinkedList<RegistryRestRequest> members = new LinkedList<>();
+        List<Metapb.Member> membersList = response.get().get(0).getMembersList();
+        for (Metapb.Member member : membersList) {
+            RegistryRestRequest restRequest = new RegistryRestRequest();
+            restRequest.setAddress(member.getRestUrl());
+            restRequest.setVersion(VERSION);
+            restRequest.setAppName(PD);
+            members.add(restRequest);
+        }
+        return members;
+    }
+
+    @GetMapping(value = "/license", consumes = MediaType.APPLICATION_JSON_VALUE,
+                produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RegistryRestResponse getLicenseInfo(HttpServletRequest request) {
+        RegistryRestResponse response = new RegistryRestResponse();
+        try {
+            response.setErrorType(Pdpb.ErrorType.OK);
+            // TODO: uncomment later
+            //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService();
+            //response.setData(licenseVerifierService.getContext());
+        } catch (Exception e) {
+            log.warn(e.getMessage());
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    @GetMapping(value = "/license/machineInfo", consumes = MediaType.APPLICATION_JSON_VALUE,
+                produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) {
+        RegistryRestResponse response = new RegistryRestResponse();
+        try {
+            response.setErrorType(Pdpb.ErrorType.OK);
+            // TODO: uncomment later
+            //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService();
+            //response.setData(licenseVerifierService.getIpAndMac());
+        } catch (Exception e) {
+            log.warn(e.getMessage());
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java
new file mode 100644
index 0000000..6cb5b09
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.service.PDService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class ShardAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+    @Autowired
+    PDService pdService;
+
+    @GetMapping(value = "/shards", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getShards() {
+
+        //对shards信息的统计
+        try {
+            List<Shard> resultShardList = new ArrayList<>();
+            List<Metapb.Graph> graphs = pdRestService.getGraphs();
+            for (Metapb.Graph graph : graphs) {
+                String graphName = graph.getGraphName();
+                List<Metapb.Partition> partitions = pdRestService.getPartitions(graphName);
+                for (Metapb.Partition pt : partitions) {
+                    Metapb.PartitionStats partitionStats =
+                            pdRestService.getPartitionStats(graphName, pt.getId());
+                    if (partitionStats != null) {
+                        List<Metapb.ShardStats> shardStatsList = partitionStats.getShardStatsList();
+                        for (Metapb.ShardStats shardStats : shardStatsList) {
+                            Shard resultShard = new Shard();
+                            resultShard.storeId = shardStats.getStoreId();
+                            resultShard.partitionId = pt.getId();
+                            resultShard.role = String.valueOf(shardStats.getRole());
+                            resultShard.state = String.valueOf(shardStats.getState());
+                            resultShard.graphName = graphName;
+                            resultShard.progress = shardStats.getProgress();
+                            resultShardList.add(resultShard);
+                        }
+                    } else {
+                        List<Metapb.Shard> shardList = new ArrayList<>();
+                        var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId());
+                        if (shardGroup != null) {
+                            shardList = shardGroup.getShardsList();
+                        } else {
+                            log.error(
+                                    "ShardAPI.getShards(), get shards of group id: {} returns " +
+                                    "null.",
+                                    pt.getId());
+                        }
+
+                        for (Metapb.Shard shard : shardList) {
+                            Shard resultShard = new Shard();
+                            resultShard.storeId = shard.getStoreId();
+                            resultShard.partitionId = pt.getId();
+                            resultShard.role = String.valueOf(shard.getRole());
+                            resultShard.state = String.valueOf(Metapb.ShardState.SState_Normal);
+                            resultShard.graphName = graphName;
+                            resultShard.progress = 0;
+                            resultShardList.add(resultShard);
+                        }
+                    }
+                }
+            }
+            HashMap<String, Object> dataMap = new HashMap<>();
+            dataMap.put("shards", resultShardList);
+            return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error("PDException: ", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    @Data
+    class Shard {
+
+        long storeId;
+        long partitionId;
+        String role;
+        String state;
+        String graphName;
+        int progress;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java
new file mode 100644
index 0000000..030d5de
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.model.RestApiResponse;
+import org.apache.hugegraph.pd.model.StoreRestRequest;
+import org.apache.hugegraph.pd.model.TimeRangeRequest;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.apache.hugegraph.pd.util.DateUtil;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.DeleteMapping;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import com.google.protobuf.util.JsonFormat;
+
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1")
+public class StoreAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+
+    @GetMapping(value = "/stores", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getStores() {
+        List<StoreStatistics> storeStatsList = new ArrayList<>();
+        try {
+            HashMap<String, Object> dataMap = new HashMap<>();
+            Map<String, Integer> stateCountMap = new HashMap<>();
+            for (Metapb.Store store : pdRestService.getStores("")) {
+                String stateKey = store.getState().name();
+                stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1);
+                storeStatsList.add(new StoreStatistics(store));
+            }
+            storeStatsList.sort((o1, o2) -> o1.address.compareTo(o2.address));
+            dataMap.put("stores", storeStatsList);
+            dataMap.put("numOfService", storeStatsList.size());
+            dataMap.put("numOfNormalService",
+                        stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0));
+            dataMap.put("stateCountMap", stateCountMap);
+            return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            log.error("PDException", e);
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    // 仅支持通过该接口修改 storeState
+    @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest request) {
+        try {
+            Metapb.Store lastStore = pdRestService.getStore(storeId);
+            if (lastStore != null) {
+                Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore);
+                Metapb.StoreState storeState = Metapb.StoreState.valueOf(request.getStoreState());
+                builder.setState(storeState);
+                Metapb.Store newStore = pdRestService.updateStore(builder.build());
+                return toJSON(newStore, "store");
+            } else {
+                return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error"));
+            }
+        } catch (PDException e) {
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/shardGroups", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getShardGroups() {
+        try {
+            return toJSON(pdRestService.getShardGroups(), "shardGroups");
+        } catch (PDException e) {
+            return toJSON(e);
+        }
+    }
+
+    /**
+     * 返回每个store上的leader
+     *
+     * @return
+     */
+    @GetMapping(value = "/shardLeaders")
+    public Map<String, List<Integer>> shardLeaders() throws PDException {
+        Map<String, List<Integer>> leaders = new HashMap<>();
+        try {
+
+            List<Metapb.ShardGroup> groups = pdRestService.getShardGroups();
+            groups.forEach(group -> {
+                group.getShardsList().forEach(shard -> {
+                    if (shard.getRole() == Metapb.ShardRole.Leader) {
+                        try {
+                            String ip = pdRestService.getStore(shard.getStoreId()).getRaftAddress();
+                            if (!leaders.containsKey(ip)) {
+                                leaders.put(ip, new ArrayList<>());
+                            }
+                            leaders.get(ip).add(group.getId());
+                        } catch (PDException e) {
+                            throw new RuntimeException(e);
+                        }
+                    }
+                });
+            });
+        } catch (PDException e) {
+            throw e;
+        }
+        return leaders;
+    }
+
+    @GetMapping(value = "/balanceLeaders")
+    public Map<Integer, Long> balanceLeaders() throws PDException {
+        return pdRestService.balancePartitionLeader();
+    }
+
+    @DeleteMapping(value = "/store/{storeId}")
+    public String removeStore(@PathVariable(value = "storeId") Long storeId) {
+        try {
+            pdRestService.removeStore(storeId);
+        } catch (PDException e) {
+            return e.getStackTrace().toString();
+        }
+        return "OK";
+    }
+
+    @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE,
+                 produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getStoreLog(@RequestBody TimeRangeRequest request) {
+        try {
+            Date dateStart = DateUtil.getDate(request.getStartTime());
+            Date dateEnd = DateUtil.getDate(request.getEndTime());
+            List<Metapb.LogRecord> changedStore =
+                    pdRestService.getStoreStatusLog(dateStart.getTime(),
+                                                    dateEnd.getTime());
+            if (changedStore != null) {
+                JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry
+                        .newBuilder().add(Metapb.Store.getDescriptor()).build();
+                return toJSON(changedStore, registry);
+            } else {
+                return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error"));
+            }
+        } catch (PDException e) {
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "store/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getStore(@PathVariable long storeId) {
+        //获取store的统计信息
+        Metapb.Store store = null;
+        try {
+            store = pdRestService.getStore(storeId);
+        } catch (PDException e) {
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+        if (store != null) {
+            StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store);
+            return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK,
+                                       Pdpb.ErrorType.OK.name());
+        } else {
+            return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST,
+                                       Pdpb.ErrorType.STORE_ID_NOT_EXIST.name());
+        }
+    }
+
+    @GetMapping(value = "storesAndStats", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String getStoresAndStats() {
+        //for debug use
+        try {
+            List<Metapb.Store> stores = pdRestService.getStores("");
+            return toJSON(stores, "stores");
+        } catch (PDException e) {
+            log.error("PD exception:" + e);
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "store_monitor/json/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public RestApiResponse getStoreMonitorData(@PathVariable long storeId) {
+        try {
+            List<Map<String, Long>> result = pdRestService.getMonitorData(storeId);
+            return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name());
+        } catch (PDException e) {
+            return new RestApiResponse(null, e.getErrorCode(), e.getMessage());
+        }
+    }
+
+    @GetMapping(value = "store_monitor/{storeId}")
+    @ResponseBody
+    public String getStoreMonitorDataText(@PathVariable long storeId) {
+        try {
+            return pdRestService.getMonitorDataText(storeId);
+        } catch (PDException e) {
+            return "error:" + e.getErrorCode() + e.getMessage();
+        }
+    }
+
+    @Data
+    class Partition {
+
+        //分区信息
+        int partitionId;
+        String graphName;
+        String role; // shard role
+        String workState;
+        long dataSize; // 占用的存储空间
+
+        Partition() {
+        }
+
+        Partition(Metapb.GraphStats graphStats) {
+            partitionId = graphStats.getPartitionId();
+            graphName = graphStats.getGraphName();
+            final int postfixLength = 2;
+            graphName = graphName.substring(0, graphName.length() - postfixLength);
+            role = String.valueOf(graphStats.getRole());
+            workState = String.valueOf(graphStats.getWorkState());
+            dataSize = graphStats.getApproximateSize();
+        }
+    }
+
+    @Data
+    class StoreStatistics {
+
+        //store的统计信息
+        long storeId;
+        String address;
+        String raftAddress;
+        String version;
+        String state;
+        String deployPath;
+        String dataPath; // 数据存储路径
+        long startTimeStamp;
+        long registedTimeStamp; // 暂时取第一次心跳时间作为注册时间
+        long lastHeartBeat; // 上一次心跳时间
+        long capacity;
+        long available;
+        int partitionCount;
+        int graphSize;
+        long keyCount;
+        long leaderCount; // shard role = 'Leader'的分区数量
+        String serviceName;
+        String serviceVersion;
+        long serviceCreatedTimeStamp; // 服务创建时间
+        List<Partition> partitions;
+
+        StoreStatistics(Metapb.Store store) {
+            if (store != null) {
+                storeId = store.getId();
+                address = store.getAddress();
+                raftAddress = store.getRaftAddress();
+                state = String.valueOf(store.getState());
+                version = store.getVersion();
+                deployPath = store.getDeployPath();
+                final String prefix = "file:";
+                if ((deployPath != null) && (deployPath.startsWith(prefix))) {
+                    // 去掉前缀
+                    deployPath = deployPath.substring(prefix.length());
+                }
+                if ((deployPath != null) && (deployPath.contains(".jar"))) {
+                    // 去掉jar包之后的信息
+                    deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4);
+                }
+                dataPath = store.getDataPath();
+                startTimeStamp = store.getStartTimestamp();
+                try {
+                    serviceCreatedTimeStamp = pdRestService.getStore(store.getId())
+                                                           .getStats().getStartTime(); // 实例时间
+                    final int base = 1000;
+                    serviceCreatedTimeStamp *= base; // 转化为毫秒
+                } catch (PDException e) {
+                    e.printStackTrace();
+                    serviceCreatedTimeStamp = store.getStartTimestamp();
+                }
+                registedTimeStamp = store.getStartTimestamp(); // 注册时间
+                lastHeartBeat = store.getLastHeartbeat();
+                capacity = store.getStats().getCapacity();
+                available = store.getStats().getAvailable();
+                partitionCount = store.getStats().getPartitionCount();
+                serviceName = address + "-store";
+                serviceVersion = store.getVersion();
+                List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList();
+                List<Partition> partitionStatsList = new ArrayList<>(); // 保存分区信息
+                HashSet<String> graphNameSet = new HashSet<>(); // 用于统计图的数量
+                HashSet<Integer> leaderPartitionIds = new HashSet<Integer>(); // 统计leader的分区数量
+                // 构造分区信息(store中存储的图信息)
+                Map<Integer, Long> partition2KeyCount = new HashMap<>();
+                for (Metapb.GraphStats graphStats : graphStatsList) {
+                    String graphName = graphStats.getGraphName();
+                    // 图名只保留/g /m /s前面的部分
+                    final int postfixLength = 2;
+                    graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength));
+                    if ((graphStats.getGraphName() != null) &&
+                        (graphStats.getGraphName().endsWith("/g"))) {
+                        Partition pt = new Partition(graphStats);
+                        partitionStatsList.add(pt);
+                    }
+                    // 统计每个分区的keyCount
+                    partition2KeyCount.put(graphStats.getPartitionId(),
+                                           graphStats.getApproximateKeys());
+                    if (graphStats.getRole() == Metapb.ShardRole.Leader) {
+                        leaderPartitionIds.add(graphStats.getPartitionId());
+                    }
+                }
+                for (Map.Entry<Integer, Long> entry : partition2KeyCount.entrySet()) {
+                    keyCount += entry.getValue();
+                }
+                partitions = partitionStatsList;
+                graphSize = graphNameSet.size();
+                leaderCount = leaderPartitionIds.size();
+            }
+
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java
new file mode 100644
index 0000000..a187614
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.service.PDRestService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/v1/task")
+public class TaskAPI extends API {
+
+    @Autowired
+    PDRestService pdRestService;
+
+    @GetMapping(value = "/patrolStores", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String patrolStores() {
+        try {
+            List<Metapb.Store> stores = pdRestService.patrolStores();
+            return toJSON(stores, "stores");
+        } catch (PDException e) {
+            e.printStackTrace();
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/patrolPartitions", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String patrolPartitions() {
+        try {
+            List<Metapb.Partition> partitions = pdRestService.patrolPartitions();
+            return toJSON(partitions, "partitions");
+        } catch (PDException e) {
+            e.printStackTrace();
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/balancePartitions", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public Map<Integer, KVPair<Long, Long>> balancePartitions() {
+        try {
+            Map<Integer, KVPair<Long, Long>> partitions = pdRestService.balancePartitions();
+            return partitions;
+        } catch (PDException e) {
+            e.printStackTrace();
+            return null;
+        }
+    }
+
+    @GetMapping(value = "/splitPartitions", produces = MediaType.APPLICATION_JSON_VALUE)
+    @ResponseBody
+    public String splitPartitions() {
+        try {
+            List<Metapb.Partition> partitions = pdRestService.splitPartitions();
+            return toJSON(partitions, "partitions");
+        } catch (PDException e) {
+            e.printStackTrace();
+            return toJSON(e);
+        }
+    }
+
+    @GetMapping(value = "/balanceLeaders")
+    public Map<Integer, Long> balanceLeaders() throws PDException {
+        return pdRestService.balancePartitionLeader();
+    }
+
+    @GetMapping(value = "/compact")
+    public String dbCompaction() throws PDException {
+        pdRestService.dbCompaction();
+        return "compact ok";
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java
new file mode 100644
index 0000000..e4ee1c1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.rest;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hugegraph.pd.RegistryService;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.grpc.pulse.ChangeShard;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse;
+import org.apache.hugegraph.pd.meta.MetadataFactory;
+import org.apache.hugegraph.pd.meta.QueueStore;
+import org.apache.hugegraph.pd.pulse.PDPulseSubject;
+import org.apache.hugegraph.pd.watch.PDWatchSubject;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.PutMapping;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.ResponseBody;
+import org.springframework.web.bind.annotation.RestController;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.Parser;
+
+import lombok.extern.slf4j.Slf4j;
+
+@RestController
+@Slf4j
+@RequestMapping("/test")
+public class TestAPI {
+
+    @Autowired
+    private PDConfig pdConfig;
+
+    @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE)
+    @ResponseBody
+    public String discovery(@PathVariable(value = "appName", required = true) String appName) {
+        RegistryService register = new RegistryService(pdConfig);
+        // Query query=Query.newBuilder().setAppName("hugegraph").build();
+        AtomicLong label = new AtomicLong();
+        HashMap<String, String> labels = new HashMap<>();
+        String labelValue = String.valueOf(label.incrementAndGet());
+        //labels.put("address",labelValue);
+        Query query = Query.newBuilder().build();
+        // Query query = Query.newBuilder().setAppName("hugegraph").set.build();
+
+        return register.getNodes(query).toString();
+    }
+
+    @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE)
+    @ResponseBody
+    public String notifyClient() {
+        PDPulseSubject.notifyClient(
+                PartitionHeartbeatResponse.newBuilder()
+                                          .setPartition(Metapb.Partition.newBuilder()
+                                                                        .setId(8)
+                                                                        .setGraphName("graphName8"))
+                                          .setChangeShard(
+                                                  ChangeShard.newBuilder()
+                                                             .setChangeTypeValue(8)
+                                                             .addShard(Metapb.Shard.newBuilder()
+                                                                                   .setRoleValue(8)
+                                                                                   .setStoreId(8)
+                                                             )
+                                          )
+
+        );
+        return "partition";
+    }
+
+    @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE)
+    @ResponseBody
+    public String noticePartition() {
+        PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, "graph-test", 99);
+        return "partition";
+    }
+
+    @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE)
+    @ResponseBody
+    public String testPutQueue() {
+        this.putQueue();
+        return "queue";
+    }
+
+    public void putQueue() {
+        PartitionHeartbeatResponse response = PartitionHeartbeatResponse.newBuilder()
+                                                                        .setPartition(
+                                                                                Metapb.Partition.newBuilder()
+                                                                                                .setId(9)
+                                                                                                .setGraphName(
+                                                                                                        "graphName"))
+                                                                        .setChangeShard(
+                                                                                ChangeShard.newBuilder()
+                                                                                           .setChangeTypeValue(
+                                                                                                   9)
+                                                                                           .addShard(
+                                                                                                   Metapb.Shard.newBuilder()
+                                                                                                               .setRoleValue(
+                                                                                                                       9)
+                                                                                                               .setStoreId(
+                                                                                                                       9)
+                                                                                           )
+                                                                        ).build();
+
+        Metapb.QueueItem.Builder builder = Metapb.QueueItem.newBuilder()
+                                                           .setItemId("item-id")
+                                                           .setItemClass("item-class")
+                                                           .setItemContent(response.toByteString());
+
+        QueueStore store = MetadataFactory.newQueueStore(pdConfig);
+
+        try {
+            store.addItem(builder.setItemId("item-id-1").build());
+            store.addItem(builder.setItemId("item-id-2").build());
+            store.addItem(builder.setItemId("item-id-3").build());
+        } catch (PDException e) {
+            e.printStackTrace();
+        }
+        List<Metapb.QueueItem> queue = null;
+        try {
+            queue = store.getQueue();
+        } catch (PDException e) {
+            e.printStackTrace();
+        }
+        Parser<PartitionHeartbeatResponse> parser = PartitionHeartbeatResponse.parser();
+
+        queue.stream().forEach(e -> {
+            PartitionHeartbeatResponse buf = null;
+            try {
+                buf = parser.parseFrom(e.getItemContent());
+            } catch (InvalidProtocolBufferException ex) {
+                ex.printStackTrace();
+            }
+            PDPulseSubject.notifyClient(PartitionHeartbeatResponse.newBuilder(buf));
+        });
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java
new file mode 100644
index 0000000..08a4e8a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import javax.annotation.PostConstruct;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hugegraph.pd.RegistryService;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.raft.RaftStateListener;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import io.grpc.ManagedChannel;
+import lombok.extern.slf4j.Slf4j;
+
+@Useless("discovery related")
+@Slf4j
+@GRpcService
+public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements
+                                                                                    ServiceGrpc,
+                                                                                    RaftStateListener {
+
+    static final AtomicLong id = new AtomicLong();
+    private static final String CORES = "cores";
+    RegistryService register = null;
+    //LicenseVerifierService licenseVerifierService;
+    @Autowired
+    private PDConfig pdConfig;
+    private ManagedChannel channel;
+
+    @PostConstruct
+    public void init() throws PDException {
+        log.info("PDService init………… {}", pdConfig);
+        RaftEngine.getInstance().init(pdConfig.getRaft());
+        RaftEngine.getInstance().addStateListener(this);
+        register = new RegistryService(pdConfig);
+        //licenseVerifierService = new LicenseVerifierService(pdConfig);
+    }
+
+    private Pdpb.ResponseHeader newErrorHeader(PDException e) {
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(
+                                                 Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()))
+                                                        .build();
+        return header;
+    }
+
+    @Override
+    public void register(NodeInfo request, io.grpc.stub.StreamObserver<RegisterInfo> observer) {
+        if (!isLeader()) {
+            redirectToLeader(null, DiscoveryServiceGrpc.getRegisterMethod(), request, observer);
+            return;
+        }
+        int outTimes = pdConfig.getDiscovery().getHeartbeatOutTimes();
+        RegisterInfo registerInfo;
+        try {
+            if (request.getAppName().equals("hg")) {
+                Query queryRequest = Query.newBuilder().setAppName(request.getAppName())
+                                          .setVersion(request.getVersion()).build();
+                NodeInfos nodes = register.getNodes(queryRequest);
+                String address = request.getAddress();
+                int nodeCount = nodes.getInfoCount() + 1;
+                for (NodeInfo node : nodes.getInfoList()) {
+                    if (node.getAddress().equals(address)) {
+                        nodeCount = nodes.getInfoCount();
+                        break;
+                    }
+                }
+                Map<String, String> labelsMap = request.getLabelsMap();
+                String coreCount = labelsMap.get(CORES);
+                if (StringUtils.isEmpty(coreCount)) {
+                    throw new PDException(-1, "core count can not be null");
+                }
+                int core = Integer.parseInt(coreCount);
+                //licenseVerifierService.verify(core, nodeCount);
+            }
+            register.register(request, outTimes);
+            String valueId = request.getId();
+            registerInfo = RegisterInfo.newBuilder().setNodeInfo(NodeInfo.newBuilder().setId(
+                                               "0".equals(valueId) ?
+                                               String.valueOf(id.incrementAndGet()) : valueId).build())
+                                       .build();
+
+        } catch (PDException e) {
+            registerInfo = RegisterInfo.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.debug("registerStore exception: ", e);
+        } catch (PDRuntimeException ex) {
+            Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(ex.getErrorCode())
+                                         .setMessage(ex.getMessage()).build();
+            Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build();
+            registerInfo = RegisterInfo.newBuilder().setHeader(header).build();
+            log.debug("registerStore exception: ", ex);
+        } catch (Exception e) {
+            Pdpb.Error error =
+                    Pdpb.Error.newBuilder().setTypeValue(Pdpb.ErrorType.UNKNOWN.getNumber())
+                              .setMessage(e.getMessage()).build();
+            Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build();
+            registerInfo = RegisterInfo.newBuilder().setHeader(header).build();
+        }
+        observer.onNext(registerInfo);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void getNodes(Query request, io.grpc.stub.StreamObserver<NodeInfos> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(null, DiscoveryServiceGrpc.getGetNodesMethod(), request,
+                             responseObserver);
+            return;
+        }
+        responseObserver.onNext(register.getNodes(request));
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public boolean isLeader() {
+        return RaftEngine.getInstance().isLeader();
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java
new file mode 100644
index 0000000..ffa8cda
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java
@@ -0,0 +1,590 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import javax.annotation.PostConstruct;
+
+import org.apache.hugegraph.pd.KvService;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.kv.K;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.Kv;
+import org.apache.hugegraph.pd.grpc.kv.KvResponse;
+import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc;
+import org.apache.hugegraph.pd.grpc.kv.LockRequest;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLRequest;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchKv;
+import org.apache.hugegraph.pd.grpc.kv.WatchRequest;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchState;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.raft.RaftStateListener;
+import org.apache.hugegraph.pd.watch.KvWatchSubject;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * kv 存储的核心实现类
+ */
+@Slf4j
+@GRpcService
+public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener,
+                                                                                  ServiceGrpc {
+
+    private final ManagedChannel channel = null;
+    KvService kvService;
+    AtomicLong count = new AtomicLong();
+    String msg = "node is not leader,it is necessary to  redirect to the leader on the client";
+    @Autowired
+    private PDConfig pdConfig;
+    private KvWatchSubject subjects;
+    private ScheduledExecutorService executor;
+
+    @PostConstruct
+    public void init() {
+        RaftEngine.getInstance().init(pdConfig.getRaft());
+        RaftEngine.getInstance().addStateListener(this);
+        kvService = new KvService(pdConfig);
+        subjects = new KvWatchSubject(pdConfig);
+        executor = Executors.newScheduledThreadPool(1);
+        executor.scheduleWithFixedDelay(() -> {
+            if (isLeader()) {
+                subjects.keepClientAlive();
+            }
+        }, 0, KvWatchSubject.WATCH_TTL / 2, TimeUnit.MILLISECONDS);
+    }
+
+    /**
+     * 普通的 put
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void put(Kv request, StreamObserver<KvResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver);
+            return;
+        }
+        KvResponse response;
+        KvResponse.Builder builder = KvResponse.newBuilder();
+        try {
+            String key = request.getKey();
+            String value = request.getValue();
+            this.kvService.put(key, value);
+            WatchKv watchKV = getWatchKv(key, value);
+            subjects.notifyAllObserver(key, WatchType.Put, new WatchKv[]{watchKV});
+            response = builder.setHeader(getResponseHeader()).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 普通的 get
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void get(K request, StreamObserver<KResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver);
+            return;
+        }
+        KResponse response;
+        KResponse.Builder builder = KResponse.newBuilder();
+        try {
+            String value = this.kvService.get(request.getKey());
+            builder.setHeader(getResponseHeader());
+            if (value != null) {
+                builder.setValue(value);
+            }
+            response = builder.build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 普通的 delete
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void delete(K request, StreamObserver<KvResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver);
+            return;
+        }
+        KvResponse response;
+        KvResponse.Builder builder = KvResponse.newBuilder();
+        try {
+            String key = request.getKey();
+            Kv deleted = this.kvService.delete(key);
+            if (deleted.getValue() != null) {
+                WatchKv watchKV = getWatchKv(deleted.getKey(), deleted.getValue());
+                subjects.notifyAllObserver(key, WatchType.Delete, new WatchKv[]{watchKV});
+            }
+            response = builder.setHeader(getResponseHeader()).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 按前缀删除
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void deletePrefix(K request, StreamObserver<KvResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request,
+                             responseObserver);
+            return;
+        }
+        KvResponse response;
+        KvResponse.Builder builder = KvResponse.newBuilder();
+        try {
+            String key = request.getKey();
+            List<Kv> kvs = this.kvService.deleteWithPrefix(key);
+            WatchKv[] watchKvs = new WatchKv[kvs.size()];
+            int i = 0;
+            for (Kv kv : kvs) {
+                WatchKv watchKV = getWatchKv(kv.getKey(), kv.getValue());
+                watchKvs[i++] = watchKV;
+            }
+            subjects.notifyAllObserver(key, WatchType.Delete, watchKvs);
+            response = builder.setHeader(getResponseHeader()).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 按前缀查询
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void scanPrefix(K request, StreamObserver<ScanPrefixResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request,
+                             responseObserver);
+            return;
+        }
+        ScanPrefixResponse response;
+        ScanPrefixResponse.Builder builder = ScanPrefixResponse.newBuilder();
+        try {
+            Map kvs = this.kvService.scanWithPrefix(request.getKey());
+            response = builder.setHeader(getResponseHeader()).putAllKvs(kvs).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 获取随机非 0 字符串做 Id
+     *
+     * @return
+     */
+    private long getRandomLong() {
+
+        long result;
+        Random random = new Random();
+        while ((result = random.nextLong()) == 0) {
+            continue;
+        }
+        return result;
+    }
+
+    /**
+     * 普通的 watch
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void watch(WatchRequest request, StreamObserver<WatchResponse> responseObserver) {
+        if (!isLeader()) {
+            responseObserver.onError(new PDException(-1, msg));
+            return;
+        }
+        try {
+            clientWatch(request, responseObserver, false);
+        } catch (PDException e) {
+            if (!isLeader()) {
+                try {
+                    responseObserver.onError(new PDException(-1, msg));
+                } catch (IllegalStateException ie) {
+
+                } catch (Exception e1) {
+                    log.error("redirect with error: ", e1);
+                }
+            }
+        }
+    }
+
+    /**
+     * 普通的前缀 watch
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void watchPrefix(WatchRequest request, StreamObserver<WatchResponse> responseObserver) {
+        if (!isLeader()) {
+            responseObserver.onError(new PDException(-1, msg));
+            return;
+        }
+        try {
+            clientWatch(request, responseObserver, true);
+        } catch (PDException e) {
+            if (!isLeader()) {
+                try {
+                    responseObserver.onError(new PDException(-1, msg));
+                } catch (IllegalStateException ie) {
+
+                } catch (Exception e1) {
+                    log.error("redirect with error: ", e1);
+                }
+            }
+        }
+    }
+
+    /**
+     * 上面两个方法的通用方式
+     *
+     * @param request
+     * @param responseObserver
+     * @param isPrefix
+     * @throws PDException
+     */
+    private void clientWatch(WatchRequest request, StreamObserver<WatchResponse> responseObserver,
+                             boolean isPrefix) throws PDException {
+        try {
+            String key = request.getKey();
+            long clientId = request.getClientId();
+            WatchResponse.Builder builder = WatchResponse.newBuilder();
+            WatchResponse response;
+            if (request.getState().equals(WatchState.Starting) && clientId == 0) {
+                clientId = getRandomLong();
+                response = builder.setClientId(clientId).setState(WatchState.Starting).build();
+            } else {
+                response = builder.setState(WatchState.Started).build();
+            }
+            String delimiter =
+                    isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER;
+            subjects.addObserver(key, clientId, responseObserver, delimiter);
+            synchronized (responseObserver) {
+                responseObserver.onNext(response);
+            }
+        } catch (PDException e) {
+            if (!isLeader()) {
+                throw new PDException(-1, msg);
+            }
+            throw new PDException(e.getErrorCode(), e);
+        }
+
+    }
+
+    /**
+     * 加锁
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void lock(LockRequest request, StreamObserver<LockResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver);
+            return;
+        }
+        LockResponse response;
+        LockResponse.Builder builder = LockResponse.newBuilder();
+        try {
+            long clientId = request.getClientId();
+            if (clientId == 0) {
+                clientId = getRandomLong();
+            }
+            boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId);
+            response =
+                    builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId)
+                           .build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver);
+                return;
+            }
+            log.error("lock with error :", e);
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public void lockWithoutReentrant(LockRequest request,
+                                     StreamObserver<LockResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request,
+                             responseObserver);
+            return;
+        }
+        LockResponse response;
+        LockResponse.Builder builder = LockResponse.newBuilder();
+        try {
+            long clientId = request.getClientId();
+            if (clientId == 0) {
+                clientId = getRandomLong();
+            }
+            boolean locked = this.kvService.lockWithoutReentrant(request.getKey(), request.getTtl(),
+                                                                 clientId);
+            response =
+                    builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId)
+                           .build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            log.error("lock with error :", e);
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public void isLocked(LockRequest request, StreamObserver<LockResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver);
+            return;
+        }
+        LockResponse response;
+        LockResponse.Builder builder = LockResponse.newBuilder();
+        try {
+            boolean locked = this.kvService.locked(request.getKey());
+            response = builder.setHeader(getResponseHeader()).setSucceed(locked).build();
+        } catch (PDException e) {
+            log.error("lock with error :", e);
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 解锁
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void unlock(LockRequest request, StreamObserver<LockResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver);
+            return;
+        }
+        LockResponse response;
+        LockResponse.Builder builder = LockResponse.newBuilder();
+        try {
+            long clientId = request.getClientId();
+            if (clientId == 0) {
+                throw new PDException(-1, "incorrect clientId: 0");
+            }
+            boolean unlocked = this.kvService.unlock(request.getKey(), clientId);
+            response = builder.setHeader(getResponseHeader()).setSucceed(unlocked)
+                              .setClientId(clientId).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 锁续活
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void keepAlive(LockRequest request, StreamObserver<LockResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request,
+                             responseObserver);
+            return;
+        }
+        LockResponse response;
+        LockResponse.Builder builder = LockResponse.newBuilder();
+        try {
+            long clientId = request.getClientId();
+            if (clientId == 0) {
+                throw new PDException(-1, "incorrect clientId: 0");
+            }
+            boolean alive = this.kvService.keepAlive(request.getKey(), clientId);
+            response =
+                    builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId)
+                           .build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 带超时时间的 put
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void putTTL(TTLRequest request, StreamObserver<TTLResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver);
+            return;
+        }
+        TTLResponse response;
+        TTLResponse.Builder builder = TTLResponse.newBuilder();
+        try {
+            this.kvService.put(request.getKey(), request.getValue(), request.getTtl());
+            response = builder.setHeader(getResponseHeader()).setSucceed(true).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 续活带有超时时间的 key
+     *
+     * @param request
+     * @param responseObserver
+     */
+    @Override
+    public void keepTTLAlive(TTLRequest request, StreamObserver<TTLResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request,
+                             responseObserver);
+            return;
+        }
+        TTLResponse response;
+        TTLResponse.Builder builder = TTLResponse.newBuilder();
+        try {
+            this.kvService.keepAlive(request.getKey());
+            response = builder.setHeader(getResponseHeader()).setSucceed(true).build();
+        } catch (PDException e) {
+            if (!isLeader()) {
+                redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request,
+                                 responseObserver);
+                return;
+            }
+            response = builder.setHeader(getResponseHeader(e)).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    private WatchKv getWatchKv(String key, String value) {
+        WatchKv kv = WatchKv.newBuilder().setKey(key).setValue(value).build();
+        return kv;
+    }
+
+    @Override
+    public void onRaftLeaderChanged() {
+        subjects.notifyClientChangeLeader();
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java
new file mode 100644
index 0000000..04db6ae
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc;
+import org.apache.hugegraph.pd.grpc.pulse.PulseRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.meta.MetadataFactory;
+import org.apache.hugegraph.pd.meta.QueueStore;
+import org.apache.hugegraph.pd.pulse.PDPulseSubject;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@GRpcService
+public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase {
+
+    private static final Supplier<List<Metapb.QueueItem>> QUEUE_RETRIEVE_FUNCTION =
+            () -> Collections.emptyList();
+    private static final Function<Metapb.QueueItem, Boolean> QUEUE_ITEM_BOOLEAN_FUNCTION =
+            (e) -> true;
+    private static final Function<String, Boolean> QUEUE_REMOVE_FUNCTION = (e) -> true;
+    @Autowired
+    private PDConfig pdConfig;
+    private QueueStore queueStore = null;
+
+    public PDPulseService() {
+        PDPulseSubject.setQueueRetrieveFunction(() -> getQueue());
+        PDPulseSubject.setQueueDurableFunction(getQueueDurableFunction());
+        PDPulseSubject.setQueueRemoveFunction(getQueueRemoveFunction());
+    }
+
+    @Override
+    public StreamObserver<PulseRequest> pulse(StreamObserver<PulseResponse> responseObserver) {
+        return PDPulseSubject.addObserver(responseObserver);
+    }
+
+    private Function<String, Boolean> getQueueRemoveFunction() {
+        return itemId -> {
+            try {
+                this.getQueueStore().removeItem(itemId);
+                return true;
+            } catch (Throwable t) {
+                log.error("Failed to remove item from store, item-id: " + itemId + ", cause by:",
+                          t);
+            }
+            return false;
+        };
+    }
+
+    private Function<Metapb.QueueItem, Boolean> getQueueDurableFunction() {
+        return item -> {
+            try {
+                this.getQueueStore().addItem(item);
+                return true;
+            } catch (Throwable t) {
+                log.error("Failed to add item to store, item: " + item.toString() + ", cause by:",
+                          t);
+            }
+            return false;
+        };
+    }
+
+    private boolean isLeader() {
+        return RaftEngine.getInstance().isLeader();
+    }
+
+    private List<Metapb.QueueItem> getQueue() {
+
+        if (!isLeader()) {
+            return Collections.emptyList();
+        }
+
+        try {
+            return this.getQueueStore().getQueue();
+        } catch (Throwable t) {
+            log.error("Failed to retrieve queue from QueueStore, cause by:", t);
+        }
+
+        log.warn("Returned empty queue list.");
+        return Collections.emptyList();
+    }
+
+    private QueueStore getQueueStore() {
+        if (this.queueStore == null) {
+            this.queueStore = MetadataFactory.newQueueStore(pdConfig);
+        }
+        return this.queueStore;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java
new file mode 100644
index 0000000..ed90220
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hugegraph.pd.ConfigService;
+import org.apache.hugegraph.pd.LogService;
+import org.apache.hugegraph.pd.PartitionService;
+import org.apache.hugegraph.pd.StoreMonitorDataService;
+import org.apache.hugegraph.pd.StoreNodeService;
+import org.apache.hugegraph.pd.TaskScheduleService;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo;
+import org.apache.hugegraph.pd.model.RegistryRestRequest;
+import org.apache.hugegraph.pd.model.RegistryRestResponse;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@Service
+public class PDRestService implements InitializingBean {
+
+    private static final String EMPTY_STRING = "";
+    @Autowired
+    PDService pdService;
+    @Autowired
+    DiscoveryService discoveryService;
+    private StoreNodeService storeNodeService;
+    private PartitionService partitionService;
+    private TaskScheduleService monitorService;
+    private ConfigService configService;
+    private LogService logService;
+    private StoreMonitorDataService storeMonitorDataService;
+
+    /**
+     * 初始化
+     *
+     * @throws Exception
+     */
+    @Override
+    public void afterPropertiesSet() throws Exception {
+        storeNodeService = pdService.getStoreNodeService();
+        partitionService = pdService.getPartitionService();
+        monitorService = pdService.getTaskService();
+        configService = pdService.getConfigService();
+        logService = pdService.getLogService();
+        storeMonitorDataService = pdService.getStoreMonitorDataService();
+        HgAssert.isNotNull(storeNodeService, "storeNodeService does not initialize");
+        HgAssert.isNotNull(partitionService, "partitionService does not initialize");
+    }
+
+    public List<Metapb.Store> getStores(String graphName) throws PDException {
+        return storeNodeService.getStores(graphName);
+    }
+
+    public Metapb.Store getStore(long storeId) throws PDException {
+        return storeNodeService.getStore(storeId);
+    }
+
+    public List<Metapb.ShardGroup> getShardGroups() throws PDException {
+        return storeNodeService.getShardGroups();
+    }
+
+    public Metapb.Store updateStore(Metapb.Store store) throws PDException {
+        logService.insertLog(LogService.NODE_CHANGE, LogService.REST, store);
+        return storeNodeService.updateStore(store);
+    }
+
+    public boolean removeStore(Long storeId) throws PDException {
+        if (storeId == null) {
+            return false;
+        }
+        return 0 != storeNodeService.removeStore(storeId);
+    }
+
+    public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException {
+        return configService.setGraphSpace(graphSpace);
+    }
+
+    public List<Metapb.GraphSpace> getGraphSpaces() throws PDException {
+        return configService.getGraphSpace(EMPTY_STRING);
+    }
+
+    public Metapb.GraphSpace getGraphSpace(String graphSpaceName) throws PDException {
+        return configService.getGraphSpace(graphSpaceName).get(0);
+    }
+
+    public List<Metapb.Graph> getGraphs() throws PDException {
+        return partitionService.getGraphs();
+    }
+
+    public Metapb.Graph getGraph(String graphName) throws PDException {
+        return partitionService.getGraph(graphName);
+    }
+
+    public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException {
+        return partitionService.updateGraph(graph);
+    }
+
+    public List<Metapb.Partition> getPartitions(String graphName) {
+        return partitionService.getPartitions(graphName);
+    }
+
+    public List<Metapb.Store> patrolStores() throws PDException {
+        return monitorService.patrolStores();
+    }
+
+    public List<Metapb.Partition> patrolPartitions() throws PDException {
+        return monitorService.patrolPartitions();
+    }
+
+    public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws
+                                                                                      PDException {
+        return partitionService.getPartitionStats(graphName, partitionId);
+    }
+
+    public List<Metapb.PartitionStats> getPartitionStatus(String graphName) throws PDException {
+        return partitionService.getPartitionStatus(graphName);
+    }
+
+    public Map<Integer, KVPair<Long, Long>> balancePartitions() throws PDException {
+        return monitorService.balancePartitionShard();
+    }
+
+    public List<Metapb.Partition> splitPartitions() throws PDException {
+        return monitorService.autoSplitPartition();
+    }
+
+    public List<Metapb.Store> getStoreStats(boolean isActive) throws PDException {
+        return storeNodeService.getStoreStatus(isActive);
+    }
+
+    public List<Map<String, Long>> getMonitorData(long storeId) throws PDException {
+        return storeMonitorDataService.getStoreMonitorData(storeId);
+    }
+
+    public String getMonitorDataText(long storeId) throws PDException {
+        return storeMonitorDataService.getStoreMonitorDataText(storeId);
+    }
+
+    public RegistryRestResponse register(NodeInfo nodeInfo) throws PDException {
+        CountDownLatch latch = new CountDownLatch(1);
+        final RegisterInfo[] info = {null};
+        RegistryRestResponse response = new RegistryRestResponse();
+        try {
+            StreamObserver<RegisterInfo> observer = new StreamObserver<RegisterInfo>() {
+                @Override
+                public void onNext(RegisterInfo value) {
+                    info[0] = value;
+                    latch.countDown();
+                }
+
+                @Override
+                public void onError(Throwable t) {
+                    latch.countDown();
+                }
+
+                @Override
+                public void onCompleted() {
+                    latch.countDown();
+                }
+            };
+            this.discoveryService.register(nodeInfo, observer);
+            latch.await();
+            Pdpb.Error error = info[0].getHeader().getError();
+            response.setErrorType(error.getType());
+            response.setMessage(error.getMessage());
+        } catch (InterruptedException e) {
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return response;
+    }
+
+    public ArrayList<RegistryRestRequest> getNodeInfo(Query request) throws PDException {
+        CountDownLatch latch = new CountDownLatch(1);
+        final NodeInfos[] info = {null};
+        RegistryRestResponse response = new RegistryRestResponse();
+        ArrayList<RegistryRestRequest> registryRestRequests = null;
+        try {
+            StreamObserver<NodeInfos> observer = new StreamObserver<NodeInfos>() {
+                @Override
+                public void onNext(NodeInfos value) {
+                    info[0] = value;
+                    latch.countDown();
+                }
+
+                @Override
+                public void onError(Throwable t) {
+                    latch.countDown();
+                }
+
+                @Override
+                public void onCompleted() {
+                    latch.countDown();
+                }
+            };
+            this.discoveryService.getNodes(request, observer);
+            latch.await();
+            List<NodeInfo> infoList = info[0].getInfoList();
+            registryRestRequests = new ArrayList(infoList.size());
+            for (int i = 0; i < infoList.size(); i++) {
+                NodeInfo element = infoList.get(i);
+                RegistryRestRequest registryRestRequest = new RegistryRestRequest();
+                registryRestRequest.setAddress(element.getAddress());
+                registryRestRequest.setAppName(element.getAppName());
+                registryRestRequest.setVersion(element.getVersion());
+                registryRestRequest.setInterval(String.valueOf(element.getInterval()));
+                HashMap<String, String> labels = new HashMap<>();
+                labels.putAll(element.getLabelsMap());
+                registryRestRequest.setLabels(labels);
+                registryRestRequests.add(registryRestRequest);
+            }
+        } catch (InterruptedException e) {
+            response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED);
+            response.setMessage(e.getMessage());
+        }
+        return registryRestRequests;
+    }
+
+    public List<Metapb.LogRecord> getStoreStatusLog(Long start, Long end) throws PDException {
+        return logService.getLog(LogService.NODE_CHANGE, start, end);
+    }
+
+    public List<Metapb.LogRecord> getPartitionLog(Long start, Long end) throws PDException {
+        return logService.getLog(LogService.PARTITION_CHANGE, start, end);
+    }
+
+    public Map<Integer, Long> balancePartitionLeader() throws PDException {
+        return monitorService.balancePartitionLeader(true);
+    }
+
+    public void dbCompaction() throws PDException {
+        monitorService.dbCompaction("");
+    }
+
+    public List<Metapb.Shard> getShardList(int partitionId) throws PDException {
+        return storeNodeService.getShardList(partitionId);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java
new file mode 100644
index 0000000..c9729bb
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java
@@ -0,0 +1,1794 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+import javax.annotation.PostConstruct;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.pd.ConfigService;
+import org.apache.hugegraph.pd.IdService;
+import org.apache.hugegraph.pd.LogService;
+import org.apache.hugegraph.pd.PartitionInstructionListener;
+import org.apache.hugegraph.pd.PartitionService;
+import org.apache.hugegraph.pd.PartitionStatusListener;
+import org.apache.hugegraph.pd.ShardGroupStatusListener;
+import org.apache.hugegraph.pd.StoreMonitorDataService;
+import org.apache.hugegraph.pd.StoreNodeService;
+import org.apache.hugegraph.pd.StoreStatusListener;
+import org.apache.hugegraph.pd.TaskScheduleService;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.ChangeShard;
+import org.apache.hugegraph.pd.grpc.pulse.CleanPartition;
+import org.apache.hugegraph.pd.grpc.pulse.DbCompaction;
+import org.apache.hugegraph.pd.grpc.pulse.MovePartition;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange;
+import org.apache.hugegraph.pd.grpc.pulse.SplitPartition;
+import org.apache.hugegraph.pd.grpc.pulse.TransferLeader;
+import org.apache.hugegraph.pd.grpc.watch.NodeEventType;
+import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+import org.apache.hugegraph.pd.pulse.PDPulseSubject;
+import org.apache.hugegraph.pd.pulse.PulseListener;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.raft.RaftStateListener;
+import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil;
+import org.apache.hugegraph.pd.watch.PDWatchSubject;
+import org.lognet.springboot.grpc.GRpcService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.util.CollectionUtils;
+
+import com.alipay.sofa.jraft.JRaftUtils;
+import com.alipay.sofa.jraft.Status;
+import com.alipay.sofa.jraft.conf.Configuration;
+import com.alipay.sofa.jraft.entity.PeerId;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+// TODO: uncomment later - remove license verifier service now
+@Slf4j
+@GRpcService
+public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener {
+
+    static String TASK_ID_KEY = "task_id";
+    private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError(
+            Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build();
+    // private ManagedChannel channel;
+    private final Map<String, ManagedChannel> channelMap = new ConcurrentHashMap<>();
+    @Autowired
+    private PDConfig pdConfig;
+    private StoreNodeService storeNodeService;
+    private PartitionService partitionService;
+    private TaskScheduleService taskService;
+    private IdService idService;
+    private ConfigService configService;
+    private LogService logService;
+    //private LicenseVerifierService licenseVerifierService;
+    private StoreMonitorDataService storeMonitorDataService;
+    private ManagedChannel channel;
+
+    private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) {
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(
+                Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build();
+        return header;
+    }
+
+    private Pdpb.ResponseHeader newErrorHeader(PDException e) {
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(
+                                                 Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()))
+                                                        .build();
+        return header;
+    }
+
+    public StoreNodeService getStoreNodeService() {
+        return storeNodeService;
+    }
+
+    public PartitionService getPartitionService() {
+        return partitionService;
+    }
+
+    public TaskScheduleService getTaskService() {
+        return taskService;
+    }
+
+    public ConfigService getConfigService() {
+        return configService;
+    }
+
+    public StoreMonitorDataService getStoreMonitorDataService() {
+        return this.storeMonitorDataService;
+    }
+
+    public LogService getLogService() {
+        return logService;
+    }
+
+    //public LicenseVerifierService getLicenseVerifierService() {
+    //    return licenseVerifierService;
+    //}
+
+    /**
+     * 初始化
+     */
+    @PostConstruct
+    public void init() throws PDException {
+        log.info("PDService init………… {}", pdConfig);
+        configService = new ConfigService(pdConfig);
+
+        RaftEngine.getInstance().addStateListener(this);
+        RaftEngine.getInstance().addStateListener(configService);
+        RaftEngine.getInstance().init(pdConfig.getRaft());
+        //pdConfig = configService.loadConfig(); onLeaderChanged 中加载
+        storeNodeService = new StoreNodeService(pdConfig);
+        partitionService = new PartitionService(pdConfig, storeNodeService);
+        taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService);
+        idService = new IdService(pdConfig);
+        logService = new LogService(pdConfig);
+        storeMonitorDataService = new StoreMonitorDataService(pdConfig);
+        //if (licenseVerifierService == null) {
+        //    licenseVerifierService = new LicenseVerifierService(pdConfig);
+        //}
+        RaftEngine.getInstance().addStateListener(partitionService);
+        pdConfig.setIdService(idService);
+
+        // 接收心跳消息
+        PDPulseSubject.listenPartitionHeartbeat(new PulseListener<PartitionHeartbeatRequest>() {
+            @Override
+            public void onNext(PartitionHeartbeatRequest request) throws Exception {
+                partitionService.partitionHeartbeat(request.getStates());
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                log.error("Received an error notice from pd-client", throwable);
+            }
+
+            @Override
+            public void onCompleted() {
+                log.info("Received an completed notice from pd-client");
+            }
+        });
+
+        /**
+         * 监听分区指令,并转发给 Store
+         */
+        partitionService.addInstructionListener(new PartitionInstructionListener() {
+            private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws
+                                                                                              PDException {
+                return PartitionHeartbeatResponse.newBuilder().setPartition(partition)
+                                                 .setId(idService.getId(TASK_ID_KEY, 1));
+            }
+
+            @Override
+            public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws
+                                                                                         PDException {
+                PDPulseSubject.notifyClient(getBuilder(partition).setChangeShard(changeShard));
+
+            }
+
+            @Override
+            public void transferLeader(Metapb.Partition partition,
+                                       TransferLeader transferLeader) throws
+                                                                      PDException {
+                PDPulseSubject.notifyClient(
+                        getBuilder(partition).setTransferLeader(transferLeader));
+            }
+
+            @Override
+            public void splitPartition(Metapb.Partition partition,
+                                       SplitPartition splitPartition) throws
+                                                                      PDException {
+                PDPulseSubject.notifyClient(
+                        getBuilder(partition).setSplitPartition(splitPartition));
+
+            }
+
+            @Override
+            public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws
+                                                                                            PDException {
+                PDPulseSubject.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction));
+
+            }
+
+            @Override
+            public void movePartition(Metapb.Partition partition,
+                                      MovePartition movePartition) throws PDException {
+                PDPulseSubject.notifyClient(getBuilder(partition).setMovePartition(movePartition));
+            }
+
+            @Override
+            public void cleanPartition(Metapb.Partition partition,
+                                       CleanPartition cleanPartition) throws PDException {
+                PDPulseSubject.notifyClient(
+                        getBuilder(partition).setCleanPartition(cleanPartition));
+            }
+
+            @Override
+            public void changePartitionKeyRange(Metapb.Partition partition,
+                                                PartitionKeyRange partitionKeyRange)
+                    throws PDException {
+                PDPulseSubject.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange));
+            }
+        });
+
+        /**
+         * 监听分区状态改变消息,并转发给 Client
+         */
+        partitionService.addStatusListener(new PartitionStatusListener() {
+            @Override
+            public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) {
+                PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER,
+                                                     partition.getGraphName(), partition.getId());
+            }
+
+            @Override
+            public void onPartitionRemoved(Metapb.Partition partition) {
+                PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL,
+                                                     partition.getGraphName(),
+                                                     partition.getId());
+
+            }
+        });
+
+        storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListener() {
+            @Override
+            public void onShardListChanged(Metapb.ShardGroup shardGroup,
+                                           Metapb.ShardGroup newShardGroup) {
+                // invoked before change, saved to db and update cache.
+                if (newShardGroup == null) {
+                    PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL,
+                                                          shardGroup.getId(),
+                                                          shardGroup);
+                } else {
+                    PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.ALTER,
+                                                          shardGroup.getId(), newShardGroup);
+                }
+            }
+
+            @Override
+            public void onShardListOp(Metapb.ShardGroup shardGroup) {
+                PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.USER_DEFINED,
+                                                      shardGroup.getId(), shardGroup);
+            }
+        });
+
+        /**
+         * 监听 store 状态改变消息,并转发给 Client
+         */
+        storeNodeService.addStatusListener(new StoreStatusListener() {
+
+            @Override
+            public void onStoreStatusChanged(Metapb.Store store,
+                                             Metapb.StoreState old,
+                                             Metapb.StoreState status) {
+                NodeEventType type = NodeEventType.NODE_EVENT_TYPE_UNKNOWN;
+                if (status == Metapb.StoreState.Up) {
+                    type = NodeEventType.NODE_EVENT_TYPE_NODE_ONLINE;
+                } else if (status == Metapb.StoreState.Offline) {
+                    type = NodeEventType.NODE_EVENT_TYPE_NODE_OFFLINE;
+                }
+                PDWatchSubject.notifyNodeChange(type, "", store.getId());
+            }
+
+            @Override
+            public void onGraphChange(Metapb.Graph graph,
+                                      Metapb.GraphState stateOld,
+                                      Metapb.GraphState stateNew) {
+                WatchGraphResponse wgr = WatchGraphResponse.newBuilder()
+                                                           .setGraph(graph)
+                                                           .build();
+                WatchResponse.Builder wr = WatchResponse.newBuilder()
+                                                        .setGraphResponse(wgr);
+                PDWatchSubject.notifyChange(WatchType.WATCH_TYPE_GRAPH_CHANGE,
+                                            wr);
+            }
+
+            @Override
+            public void onStoreRaftChanged(Metapb.Store store) {
+                PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "",
+                                                store.getId());
+            }
+        });
+        storeNodeService.init(partitionService);
+        partitionService.init();
+        taskService.init();
+        // log.info("init .......");
+        // licenseVerifierService.init();
+
+        // UpgradeService upgradeService = new UpgradeService(pdConfig);
+        // upgradeService.upgrade();
+    }
+
+    /**
+     * <pre>
+     * 注册 store,首次注册会生成新的 store_id,store_id 是 store 唯一标识
+     * </pre>
+     */
+    @Override
+    public void registerStore(Pdpb.RegisterStoreRequest request,
+                              io.grpc.stub.StreamObserver<Pdpb.RegisterStoreResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getRegisterStoreMethod(), request, observer);
+            return;
+        }
+        Pdpb.RegisterStoreResponse response = null;
+        try {
+            Metapb.Store store = storeNodeService.register(request.getStore());
+            response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader)
+                                                 .setStoreId(store.getId())
+                                                 .build();
+        } catch (PDException e) {
+            response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("registerStore exception: ", e);
+        }
+        // 拉取所有分区信息,并返回
+        observer.onNext(response);
+        observer.onCompleted();
+
+    }
+
+    /**
+     * 根据 store_id 查找 store
+     */
+    @Override
+    public void getStore(Pdpb.GetStoreRequest request,
+                         io.grpc.stub.StreamObserver<Pdpb.GetStoreResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetStoreMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetStoreResponse response = null;
+        try {
+            Metapb.Store store = storeNodeService.getStore(request.getStoreId());
+            response =
+                    Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build();
+        } catch (PDException e) {
+            response = Pdpb.GetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e);
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 修改 Store 状态等信息。
+     * </pre>
+     */
+    @Override
+    public void setStore(Pdpb.SetStoreRequest request,
+                         StreamObserver<Pdpb.SetStoreResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSetStoreMethod(), request, observer);
+            return;
+        }
+        Pdpb.SetStoreResponse response = null;
+        try {
+            Metapb.StoreState state = request.getStore().getState();
+            Long storeId = request.getStore().getId();
+            // 处于 Pending 状态,才可以上线
+            Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId());
+            if (lastStore == null) {
+                // storeId 不存在,抛出异常
+                throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE,
+                                      String.format("Store id %d does not exist!", storeId));
+            }
+            if (Metapb.StoreState.Up.equals(state)) {
+                if (!Metapb.StoreState.Pending.equals(lastStore.getState())) {
+                    throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE,
+                                          "only stores in Pending state can be set to Up!");
+                }
+            }
+            if (state.equals(Metapb.StoreState.Offline)) {
+                Metapb.ClusterStats stats = storeNodeService.getClusterStats();
+                if (stats.getState() != Metapb.ClusterState.Cluster_OK) {
+                    Pdpb.ResponseHeader errorHeader = newErrorHeader(-1,
+                                                                     "can not offline node "
+                                                                     +
+                                                                     "when cluster state is not " +
+                                                                     "normal ");
+                    response = Pdpb.SetStoreResponse.newBuilder().setHeader(errorHeader).build();
+                    observer.onNext(response);
+                    observer.onCompleted();
+                    return;
+                }
+            }
+            logService.insertLog(LogService.NODE_CHANGE, LogService.GRPC, request.getStore());
+            // 检查失败,状态改为 Pending,把错误原因返回去
+            if (state.equals(Metapb.StoreState.Up)) {
+                int cores = 0;
+                long id = request.getStore().getId();
+                List<Metapb.Store> stores = storeNodeService.getStores();
+                int nodeCount = 0;
+                for (Metapb.Store store : stores) {
+                    if (store.getId() == id) {
+                        // 获取之前注册的 store 中的 cores 作为验证参数
+                        cores = store.getCores();
+                    }
+                    if (store.getState().equals(Metapb.StoreState.Up)) {
+                        nodeCount++;
+                    }
+                }
+                try {
+                    //licenseVerifierService.verify(cores, nodeCount);
+                } catch (Exception e) {
+                    Metapb.Store store = Metapb.Store.newBuilder(request.getStore())
+                                                     .setState(Metapb.StoreState.Pending).build();
+                    storeNodeService.updateStore(store);
+                    throw new PDException(Pdpb.ErrorType.LICENSE_ERROR_VALUE,
+                                          "check license with error :"
+                                          + e.getMessage()
+                                          + ", and changed node state to 'Pending'");
+                }
+            }
+            Metapb.Store store = request.getStore();
+            // 下线之前先判断一下,活跃机器数是否大于最小阈值
+            if (state.equals(Metapb.StoreState.Tombstone)) {
+                List<Metapb.Store> activeStores = storeNodeService.getActiveStores();
+                if (lastStore.getState() == Metapb.StoreState.Up
+                    && activeStores.size() - 1 < pdConfig.getMinStoreCount()) {
+                    throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE,
+                                          "The number of active stores is less then " +
+                                          pdConfig.getMinStoreCount());
+                }
+                if (!storeNodeService.checkStoreCanOffline(request.getStore())) {
+                    throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE,
+                                          "check activeStores or online shardsList size");
+                }
+                if (lastStore.getState() == Metapb.StoreState.Exiting) {
+                    // 如果已经是下线中的状态,则不作进一步处理
+                    throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE,
+                                          "Downline is in progress, do not resubmit");
+                }
+                Map<String, Object> resultMap = taskService.canAllPartitionsMovedOut(lastStore);
+                if ((boolean) resultMap.get("flag")) {
+                    if (resultMap.get("current_store_is_online") != null
+                        && (boolean) resultMap.get("current_store_is_online")) {
+                        log.info("updateStore removeActiveStores store {}", store.getId());
+                        // 将在线的 store 的状态设置为下线中,等待副本迁移
+                        store = Metapb.Store.newBuilder(lastStore)
+                                            .setState(Metapb.StoreState.Exiting).build();
+                        // 进行分区迁移操作
+                        taskService.movePartitions((Map<Integer, KVPair<Long, Long>>) resultMap.get(
+                                "movedPartitions"));
+                    } else {
+                        // store 已经离线的,不做副本迁移
+                        // 将状态改为 Tombstone
+                    }
+                } else {
+                    throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE,
+                                          "the resources on other stores may be not enough to " +
+                                          "store " +
+                                          "the partitions of current store!");
+                }
+            }
+            // 替换 license 都走 grpc
+            store = storeNodeService.updateStore(store);
+            response =
+                    Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build();
+        } catch (PDException e) {
+            response = Pdpb.SetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("setStore exception: ", e);
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 返回所有的 store,exclude_offline_stores=true,返回活跃的 stores
+     */
+    @Override
+    public void getAllStores(Pdpb.GetAllStoresRequest request,
+                             io.grpc.stub.StreamObserver<Pdpb.GetAllStoresResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetAllStoresMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetAllStoresResponse response = null;
+        try {
+            List<Metapb.Store> stores = null;
+            if (request.getExcludeOfflineStores()) {
+                stores = storeNodeService.getActiveStores(request.getGraphName());
+            } else {
+                stores = storeNodeService.getStores(request.getGraphName());
+            }
+            response =
+                    Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores)
+                                             .build();
+        } catch (PDException e) {
+            response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getAllStores exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 处理 store 心跳
+     */
+    @Override
+    public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request,
+                               io.grpc.stub.StreamObserver<Pdpb.StoreHeartbeatResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getStoreHeartbeatMethod(), request, observer);
+            return;
+        }
+
+        Metapb.StoreStats stats = request.getStats();
+
+        // save monitor data when monitor data enabled
+        if (this.pdConfig.getStore().isMonitorDataEnabled()) {
+            try {
+                storeMonitorDataService.saveMonitorData(stats);
+            } catch (PDException e) {
+                log.error("save status failed, state:{}", stats);
+            }
+            // remove system_metrics
+            stats = Metapb.StoreStats.newBuilder()
+                                     .mergeFrom(request.getStats())
+                                     .clearField(Metapb.StoreStats.getDescriptor().findFieldByName(
+                                             "system_metrics"))
+                                     .build();
+        }
+
+        Pdpb.StoreHeartbeatResponse response = null;
+        try {
+            Metapb.ClusterStats clusterStats = storeNodeService.heartBeat(stats);
+            response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader)
+                                                  .setClusterStats(clusterStats).build();
+        } catch (PDException e) {
+            response =
+                    Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("storeHeartbeat exception: ", e);
+        } catch (Exception e2) {
+            response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(
+                    newErrorHeader(Pdpb.ErrorType.UNKNOWN_VALUE, e2.getMessage())).build();
+            log.error("storeHeartbeat exception: ", e2);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 查找 key 所属的分区
+     * </pre>
+     */
+    @Override
+    public void getPartition(Pdpb.GetPartitionRequest request,
+                             io.grpc.stub.StreamObserver<Pdpb.GetPartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPartitionMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetPartitionResponse response = null;
+        try {
+            Metapb.PartitionShard partShard =
+                    partitionService.getPartitionShard(request.getGraphName(),
+                                                       request.getKey()
+                                                              .toByteArray());
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader)
+                                                .setPartition(partShard.getPartition())
+                                                .setLeader(partShard.getLeader()).build();
+        } catch (PDException e) {
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getPartition exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 查找 HashCode 所属的分区
+     * </pre>
+     */
+    @Override
+    public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request,
+                                   io.grpc.stub.StreamObserver<Pdpb.GetPartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPartitionByCodeMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetPartitionResponse response = null;
+        try {
+            Metapb.PartitionShard partShard =
+                    partitionService.getPartitionByCode(request.getGraphName(),
+                                                        request.getCode());
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader)
+                                                .setPartition(partShard.getPartition())
+                                                .setLeader(partShard.getLeader()).build();
+        } catch (PDException e) {
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getPartitionByCode exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 根据 partition_id 查找 partition
+     */
+    @Override
+    public void getPartitionByID(Pdpb.GetPartitionByIDRequest request,
+                                 io.grpc.stub.StreamObserver<Pdpb.GetPartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPartitionByIDMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetPartitionResponse response = null;
+        try {
+            Metapb.PartitionShard partShard =
+                    partitionService.getPartitionShardById(request.getGraphName(),
+                                                           request.getPartitionId());
+            if (partShard == null) {
+                throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE,
+                                      String.format("partition: %s-%s not found",
+                                                    request.getGraphName(),
+                                                    request.getPartitionId()));
+            }
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader)
+                                                .setPartition(partShard.getPartition())
+                                                .setLeader(partShard.getLeader()).build();
+        } catch (PDException e) {
+            response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getPartitionByID exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 更新分区信息,主要用来更新分区 key 范围,调用此接口需谨慎,否则会造成数据丢失。
+     * </pre>
+     */
+    @Override
+    public void updatePartition(Pdpb.UpdatePartitionRequest request,
+                                io.grpc.stub.StreamObserver<Pdpb.UpdatePartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getUpdatePartitionMethod(), request, observer);
+            return;
+        }
+        Pdpb.UpdatePartitionResponse response = null;
+        try {
+            partitionService.updatePartition(request.getPartitionList());
+            response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build();
+
+        } catch (PDException e) {
+            response =
+                    Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("update partition exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 根据 partition_id 查找 partition
+     */
+    @Override
+    public void delPartition(Pdpb.DelPartitionRequest request,
+                             io.grpc.stub.StreamObserver<Pdpb.DelPartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getDelPartitionMethod(), request, observer);
+            return;
+        }
+        Pdpb.DelPartitionResponse response = null;
+        try {
+            Metapb.Partition partition = partitionService.getPartitionById(request.getGraphName(),
+                                                                           request.getPartitionId());
+            if (partition != null) {
+                partitionService.removePartition(request.getGraphName(),
+                                                 request.getPartitionId());
+                response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader)
+                                                    .setPartition(partition)
+                                                    .build();
+            } else {
+                response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build();
+            }
+        } catch (PDException e) {
+            response = Pdpb.DelPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("delPartition exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 给定 key 范围查找所属的 partition 集合
+     */
+    @Override
+    public void scanPartitions(Pdpb.ScanPartitionsRequest request,
+                               io.grpc.stub.StreamObserver<Pdpb.ScanPartitionsResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getScanPartitionsMethod(), request, observer);
+            return;
+        }
+        Pdpb.ScanPartitionsResponse response = null;
+        try {
+            List<Metapb.PartitionShard> partShards =
+                    partitionService.scanPartitions(request.getGraphName(),
+                                                    request.getStartKey()
+                                                           .toByteArray(),
+                                                    request.getEndKey()
+                                                           .toByteArray());
+            response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader)
+                                                  .addAllPartitions(partShards).build();
+        } catch (PDException e) {
+            response =
+                    Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("scanPartitions exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 获得图信息
+     */
+    @Override
+    public void getGraph(GetGraphRequest request,
+                         io.grpc.stub.StreamObserver<Pdpb.GetGraphResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetGraphMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.GetGraphResponse response = null;
+        String graphName = request.getGraphName();
+        try {
+            Metapb.Graph graph = partitionService.getGraph(graphName);
+            if (graph != null) {
+                response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph)
+                                                .build();
+            } else {
+                Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(
+                        Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_FOUND).build()).build();
+                response = Pdpb.GetGraphResponse.newBuilder().setHeader(header).build();
+            }
+        } catch (PDException e) {
+            response = Pdpb.GetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getGraph exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 修改图信息
+     */
+    @Override
+    public void setGraph(Pdpb.SetGraphRequest request,
+                         io.grpc.stub.StreamObserver<Pdpb.SetGraphResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSetGraphMethod(), request, observer);
+            return;
+        }
+        Pdpb.SetGraphResponse response = null;
+        Metapb.Graph graph = request.getGraph();
+        try {
+            graph = partitionService.updateGraph(graph);
+            response =
+                    Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build();
+        } catch (PDException e) {
+            log.error("setGraph exception: ", e);
+            response = Pdpb.SetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 获得图信息
+     */
+    @Override
+    public void delGraph(Pdpb.DelGraphRequest request,
+                         io.grpc.stub.StreamObserver<Pdpb.DelGraphResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getDelGraphMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.DelGraphResponse response = null;
+        String graphName = request.getGraphName();
+        try {
+            Metapb.Graph graph = partitionService.delGraph(graphName);
+            if (graph != null) {
+                response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph)
+                                                .build();
+            }
+        } catch (PDException e) {
+            response = Pdpb.DelGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getGraph exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 根据条件查询分区信息,包括 Store、Graph 等条件
+     * </pre>
+     */
+    @Override
+    public void queryPartitions(Pdpb.QueryPartitionsRequest request,
+                                io.grpc.stub.StreamObserver<Pdpb.QueryPartitionsResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getQueryPartitionsMethod(), request, observer);
+            return;
+        }
+        //TODO 临时采用遍历方案,后续使用 rocksdb 存储时,通过 kv 索引实现
+        Metapb.PartitionQuery query = request.getQuery();
+        List<Metapb.Partition> partitions = partitionService.getPartitions(query.getGraphName());
+        List<Metapb.Partition> result = new ArrayList<>();
+        if (!CollectionUtils.isEmpty(partitions)) {
+            for (Metapb.Partition partition : partitions) {
+                if (query.hasPartitionId() && partition.getId() != query.getPartitionId()) {
+                    continue;
+                }
+                if (query.hasGraphName() &&
+                    !partition.getGraphName().equals(query.getGraphName())) {
+                    continue;
+                }
+                long storeId = query.getStoreId();
+                if (query.hasStoreId() && query.getStoreId() != 0) {
+                    try {
+                        storeNodeService.getShardGroup(partition.getId()).getShardsList()
+                                        .forEach(shard -> {
+                                            if (shard.getStoreId() == storeId) {
+                                                result.add(partition);
+                                            }
+                                        });
+                    } catch (PDException e) {
+                        log.error("query partitions error, req:{}, error:{}", request,
+                                  e.getMessage());
+                    }
+                } else {
+                    result.add(partition);
+                }
+            }
+        }
+        Pdpb.QueryPartitionsResponse response = Pdpb.QueryPartitionsResponse.newBuilder()
+                                                                            .addAllPartitions(
+                                                                                    result).build();
+        observer.onNext(response);
+        observer.onCompleted();
+
+    }
+
+    @Override
+    public void getId(Pdpb.GetIdRequest request,
+                      StreamObserver<Pdpb.GetIdResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetIdMethod(), request, responseObserver);
+            return;
+        }
+        long id = 0L;
+        try {
+            id = idService.getId(request.getKey(), request.getDelta());
+        } catch (PDException e) {
+            responseObserver.onError(e);
+            log.error("getId exception: ", e);
+            return;
+        }
+        Pdpb.GetIdResponse response =
+                Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta())
+                                  .build();
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public void resetId(Pdpb.ResetIdRequest request,
+                        StreamObserver<Pdpb.ResetIdResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getResetIdMethod(), request, responseObserver);
+            return;
+        }
+        try {
+            idService.resetId(request.getKey());
+        } catch (PDException e) {
+            responseObserver.onError(e);
+            log.error("getId exception: ", e);
+            return;
+        }
+        Pdpb.ResetIdResponse response = Pdpb.ResetIdResponse.newBuilder().setResult(0).build();
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    /**
+     * 获取集群成员信息
+     */
+    @Override
+    public void getMembers(Pdpb.GetMembersRequest request,
+                           io.grpc.stub.StreamObserver<Pdpb.GetMembersResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetMembersMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetMembersResponse response;
+        try {
+            response = Pdpb.GetMembersResponse.newBuilder()
+                                              .addAllMembers(RaftEngine.getInstance().getMembers())
+                                              .setLeader(RaftEngine.getInstance().getLocalMember())
+                                              .build();
+
+        } catch (Exception e) {
+            log.error("getMembers exception: ", e);
+            response = Pdpb.GetMembersResponse.newBuilder()
+                                              .setHeader(newErrorHeader(-1, e.getMessage()))
+                                              .build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void getStoreStatus(Pdpb.GetAllStoresRequest request,
+                               io.grpc.stub.StreamObserver<Pdpb.GetAllStoresResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetStoreStatusMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetAllStoresResponse response = null;
+        try {
+            List<Metapb.Store> stores = null;
+            stores = storeNodeService.getStoreStatus(request.getExcludeOfflineStores());
+            response =
+                    Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores)
+                                             .build();
+        } catch (PDException e) {
+            response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            log.error("getAllStores exception: ", e);
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 读取 PD 配置
+     */
+    @Override
+    public void getPDConfig(Pdpb.GetPDConfigRequest request,
+                            io.grpc.stub.StreamObserver<Pdpb.GetPDConfigResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPDConfigMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetPDConfigResponse response = null;
+        try {
+            Metapb.PDConfig pdConfig = null;
+            pdConfig = configService.getPDConfig(request.getVersion());
+            response =
+                    Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig)
+                                            .build();
+        } catch (PDException e) {
+            response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 修改 PD 配置
+     */
+    @Override
+    public void setPDConfig(Pdpb.SetPDConfigRequest request,
+                            io.grpc.stub.StreamObserver<Pdpb.SetPDConfigResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSetPDConfigMethod(), request, observer);
+            return;
+        }
+        Pdpb.SetPDConfigResponse response = null;
+        try {
+            if (request.getPdConfig().getShardCount() % 2 != 1) {
+                // 副本数奇偶校验
+                throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE,
+                                      "shard count must be an odd number!");
+            }
+            if (request.getPdConfig().getShardCount() >
+                storeNodeService.getActiveStores().size()) {
+                // 不能大于活跃的 store 数量
+                throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE,
+                                      "shard count can't be greater than the number of active " +
+                                      "stores!");
+            }
+            int oldShardCount = configService.getPDConfig().getShardCount();
+            int newShardCount = request.getPdConfig().getShardCount();
+            if (newShardCount > oldShardCount) {
+                // 如果副本数增大,则检查 store 内部的资源是否够用
+                if (!isResourceEnough(oldShardCount, newShardCount)) {
+                    throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE,
+                                          "There is not enough disk space left!");
+                }
+
+                if (!checkShardCount(newShardCount)) {
+                    throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE,
+                                          "the cluster can't support so many shard count!");
+                }
+            }
+            configService.setPDConfig(request.getPdConfig());
+            response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 读取图空间配置
+     */
+    @Override
+    public void getGraphSpace(Pdpb.GetGraphSpaceRequest request,
+                              io.grpc.stub.StreamObserver<Pdpb.GetGraphSpaceResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetGraphSpaceMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetGraphSpaceResponse response = null;
+        try {
+            List<Metapb.GraphSpace> graphSpaces = null;
+            graphSpaces = configService.getGraphSpace(request.getGraphSpaceName());
+            response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(okHeader)
+                                                 .addAllGraphSpace(graphSpaces).build();
+        } catch (PDException e) {
+            response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 修改图空间配置
+     */
+    @Override
+    public void setGraphSpace(Pdpb.SetGraphSpaceRequest request,
+                              io.grpc.stub.StreamObserver<Pdpb.SetGraphSpaceResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSetGraphSpaceMethod(), request, observer);
+            return;
+        }
+        Pdpb.SetGraphSpaceResponse response = null;
+        try {
+            configService.setGraphSpace(request.getGraphSpace());
+            response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 数据分裂
+     * </pre>
+     */
+    @Override
+    public void splitData(Pdpb.SplitDataRequest request,
+                          StreamObserver<Pdpb.SplitDataResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer);
+            return;
+        }
+        logService.insertLog(LogService.PARTITION_CHANGE, "splitData", request);
+        Pdpb.SplitDataResponse response = null;
+        try {
+            taskService.splitPartition(request.getMode(), request.getParamList());
+            response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("splitData exception {}", e);
+            response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+
+    }
+
+    @Override
+    public void splitGraphData(Pdpb.SplitGraphDataRequest request,
+                               StreamObserver<Pdpb.SplitDataResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer);
+            return;
+        }
+        logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request);
+        Pdpb.SplitDataResponse response;
+        try {
+            partitionService.splitPartition(partitionService.getGraph(request.getGraphName()),
+                                            request.getToCount());
+            response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("splitGraphData exception {}", e);
+            response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * 在 store 之间平衡数据
+     */
+    @Override
+    public void movePartition(Pdpb.MovePartitionRequest request,
+                              StreamObserver<Pdpb.MovePartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer);
+            return;
+        }
+        logService.insertLog(LogService.PARTITION_CHANGE, "balanceData", request);
+        Pdpb.MovePartitionResponse response = null;
+        try {
+            taskService.patrolPartitions();
+            taskService.balancePartitionShard();
+            response = Pdpb.MovePartitionResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("transferData exception {}", e);
+            response = Pdpb.MovePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 获取集群健康状态
+     * </pre>
+     */
+    @Override
+    public void getClusterStats(Pdpb.GetClusterStatsRequest request,
+                                io.grpc.stub.StreamObserver<Pdpb.GetClusterStatsResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetClusterStatsMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetClusterStatsResponse response = null;
+        response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader)
+                                               .setCluster(storeNodeService.getClusterStats())
+                                               .build();
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * <pre>
+     * 汇报分区分裂等任务执行结果
+     * </pre>
+     */
+    @Override
+    public void reportTask(Pdpb.ReportTaskRequest request,
+                           io.grpc.stub.StreamObserver<Pdpb.ReportTaskResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getReportTaskMethod(), request, observer);
+            return;
+        }
+        try {
+            taskService.reportTask(request.getTask());
+        } catch (Exception e) {
+            log.error("PDService.reportTask {}", e);
+        }
+        Pdpb.ReportTaskResponse response = null;
+        response = Pdpb.ReportTaskResponse.newBuilder().setHeader(okHeader).build();
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     *
+     */
+    @Override
+    public void getPartitionStats(Pdpb.GetPartitionStatsRequest request,
+                                  io.grpc.stub.StreamObserver<Pdpb.GetPartitionStatsResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPartitionStatsMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetPartitionStatsResponse response;
+        // TODO
+        try {
+            Metapb.PartitionStats stats = partitionService.getPartitionStats(request.getGraphName(),
+                                                                             request.getPartitionId());
+            response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(okHeader)
+                                                     .setPartitionStats(stats).build();
+        } catch (PDException e) {
+            log.error("getPartitionStats exception {}", e);
+            response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e))
+                                                     .build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public boolean isLeader() {
+        return RaftEngine.getInstance().isLeader();
+    }
+
+    //private <ReqT, RespT, StubT extends AbstractBlockingStub<StubT>> void redirectToLeader(
+    //        MethodDescriptor<ReqT, RespT> method, ReqT req, io.grpc.stub.StreamObserver<RespT>
+    //        observer) {
+    //    try {
+    //        var addr = RaftEngine.getInstance().getLeaderGrpcAddress();
+    //        ManagedChannel channel;
+    //
+    //        if ((channel = channelMap.get(addr)) == null) {
+    //            synchronized (this) {
+    //                if ((channel = channelMap.get(addr)) == null|| channel.isShutdown()) {
+    //                    channel = ManagedChannelBuilder
+    //                            .forTarget(addr).usePlaintext()
+    //                            .build();
+    //                }
+    //            }
+    //            log.info("Grpc get leader address {}", RaftEngine.getInstance()
+    //            .getLeaderGrpcAddress());
+    //        }
+    //
+    //        io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions
+    //        .DEFAULT), req,
+    //                                                observer);
+    //    } catch (Exception e) {
+    //        e.printStackTrace();
+    //    }
+    //}
+
+    /**
+     * 更新 peerList
+     */
+    @Override
+    public void changePeerList(Pdpb.ChangePeerListRequest request,
+                               io.grpc.stub.StreamObserver<Pdpb.getChangePeerListResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getChangePeerListMethod(), request, observer);
+            return;
+        }
+        Pdpb.getChangePeerListResponse response;
+        try {
+            Status status = RaftEngine.getInstance().changePeerList(request.getPeerList());
+            Pdpb.ResponseHeader responseHeader =
+                    status.isOk() ? okHeader : newErrorHeader(status.getCode(),
+                                                              status.getErrorMsg());
+            response =
+                    Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build();
+
+        } catch (Exception e) {
+            log.error("changePeerList exception: ", e);
+            response = Pdpb.getChangePeerListResponse.newBuilder()
+                                                     .setHeader(newErrorHeader(-1, e.getMessage()))
+                                                     .build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public synchronized void onRaftLeaderChanged() {
+        log.info("onLeaderChanged");
+        // channel = null;
+        // TODO: uncomment later
+        //if (licenseVerifierService == null) {
+        //    licenseVerifierService = new LicenseVerifierService(pdConfig);
+        //}
+        //licenseVerifierService.init();
+
+        try {
+            PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_PD_LEADER_CHANGE,
+                                            RaftEngine.getInstance().getLeaderGrpcAddress(), 0L);
+        } catch (ExecutionException | InterruptedException e) {
+            log.error("failed to notice client", e);
+        }
+    }
+
+    @Override
+    public void balanceLeaders(Pdpb.BalanceLeadersRequest request,
+                               StreamObserver<Pdpb.BalanceLeadersResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getBalanceLeadersMethod(), request, observer);
+            return;
+        }
+        logService.insertLog(LogService.PARTITION_CHANGE, "balanceLeaders", request);
+        Pdpb.BalanceLeadersResponse response = null;
+        try {
+            taskService.balancePartitionLeader(true);
+            response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("balance Leaders exception: ", e);
+            response =
+                    Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void putLicense(PutLicenseRequest request,
+                           StreamObserver<PutLicenseResponse> responseObserver) {
+        PutLicenseResponse response = null;
+        boolean moved = false;
+        String bakPath = pdConfig.getLicensePath() + "-bak";
+        File bakFile = new File(bakPath);
+        File licenseFile = new File(pdConfig.getLicensePath());
+        try {
+            byte[] content = request.getContent().toByteArray();
+            if (licenseFile.exists()) {
+                if (bakFile.exists()) {
+                    FileUtils.deleteQuietly(bakFile);
+                }
+                FileUtils.moveFile(licenseFile, bakFile);
+                moved = true;
+            }
+            FileUtils.writeByteArrayToFile(licenseFile, content, false);
+        } catch (Exception e) {
+            log.error("putLicense with error: {}", e);
+            if (moved) {
+                try {
+                    FileUtils.moveFile(bakFile, licenseFile);
+                } catch (IOException ex) {
+                    log.error("failed to restore the license file.{}", ex);
+                }
+            }
+            Pdpb.ResponseHeader header =
+                    newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage());
+            response = Pdpb.PutLicenseResponse.newBuilder().setHeader(header).build();
+        }
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public void delStore(Pdpb.DetStoreRequest request,
+                         StreamObserver<Pdpb.DetStoreResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getDelStoreMethod(), request, observer);
+            return;
+        }
+        long storeId = request.getStoreId();
+        Pdpb.DetStoreResponse response = null;
+        try {
+            Metapb.Store store = storeNodeService.getStore(storeId);
+            if (Metapb.StoreState.Tombstone == store.getState()) {
+                // 只有已经被下线 (Tombstone) 的 store 可以被删除
+                storeNodeService.removeStore(storeId);
+                response = Pdpb.DetStoreResponse.newBuilder()
+                                                .setHeader(okHeader)
+                                                .setStore(store)
+                                                .build();
+            } else {
+                throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE,
+                                      "the store can't be deleted, please check store state!");
+            }
+        } catch (PDException e) {
+            log.error("delete store exception: {}", e);
+            response = Pdpb.DetStoreResponse.newBuilder()
+                                            .setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    /**
+     * check the shard whether exceed the cluster's max shard group count
+     *
+     * @param newShardCount new shard count
+     * @return true if can be set to new shard count, otherwise false
+     */
+    private boolean checkShardCount(int newShardCount) {
+        try {
+            var maxCount = pdConfig.getPartition().getMaxShardsPerStore() *
+                           storeNodeService.getActiveStores().size() /
+                           pdConfig.getConfigService().getPartitionCount();
+
+            if (newShardCount > maxCount) {
+                log.error("new shard count :{} exceed current cluster max shard count {}",
+                          newShardCount, maxCount);
+                return false;
+            }
+        } catch (Exception e) {
+            log.error("checkShardCount: {}", e.getMessage());
+        }
+        return true;
+    }
+
+    /**
+     * 检查 store 资源是否够用
+     */
+    public boolean isResourceEnough(int oldShardCount, int newShardCount) {
+        // 活跃的 store 的资源是否够用
+        try {
+
+            float expansionRatio = newShardCount / oldShardCount; // 占用的存储空间膨胀的倍数
+            // 当前占用的空间
+            long currentDataSize = 0L;
+            // 数据膨胀后占用的空间
+            long newDataSize = 0L;
+            // 总的可用空间
+            long totalAvaible = 0L;
+            // 统计当前占用的存储空间
+            for (Metapb.Store store : storeNodeService.getStores()) {
+                List<Metapb.GraphStats> graphStatsList = store.getStats().getGraphStatsList();
+                for (Metapb.GraphStats graphStats : graphStatsList) {
+                    currentDataSize += graphStats.getApproximateSize();
+                }
+            }
+            // 估计数据膨胀后占用的存储空间
+            newDataSize = (long) Math.ceil(currentDataSize * expansionRatio);
+            // 统计所有活跃的 store 里面可用的空间
+            List<Metapb.Store> activeStores = storeNodeService.getActiveStores();
+            for (Metapb.Store store : activeStores) {
+                Metapb.StoreStats storeStats = store.getStats();
+                totalAvaible += storeStats.getAvailable();
+            }
+            // 考虑当分区均匀分配的情况下,资源是否可用
+            return totalAvaible > newDataSize - currentDataSize;
+        } catch (PDException e) {
+            e.printStackTrace();
+            return false;
+        }
+    }
+
+    /**
+     * <pre>
+     * 对 rocksdb 进行 compaction
+     * </pre>
+     */
+    @Override
+    public void dbCompaction(Pdpb.DbCompactionRequest request,
+                             StreamObserver<Pdpb.DbCompactionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer);
+            return;
+        }
+        logService.insertLog(LogService.TASK, "dbCompaction", request);
+        Pdpb.DbCompactionResponse response = null;
+        try {
+            log.info("dbCompaction call dbCompaction");
+            taskService.dbCompaction(request.getTableName());
+            response = Pdpb.DbCompactionResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("dbCompaction exception {}", e);
+            response = Pdpb.DbCompactionResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void combineCluster(Pdpb.CombineClusterRequest request,
+                               StreamObserver<Pdpb.CombineClusterResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getCombineClusterMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.CombineClusterResponse response;
+
+        try {
+            partitionService.combinePartition(request.getToCount());
+            response = Pdpb.CombineClusterResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            response =
+                    Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void combineGraph(Pdpb.CombineGraphRequest request,
+                             StreamObserver<Pdpb.CombineGraphResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getCombineGraphMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.CombineGraphResponse response;
+
+        try {
+            partitionService.combineGraphPartition(request.getGraphName(), request.getToCount());
+            response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request,
+                                 StreamObserver<Pdpb.DeleteShardGroupResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getDeleteShardGroupMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.DeleteShardGroupResponse response;
+
+        try {
+            storeNodeService.deleteShardGroup(request.getGroupId());
+            response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            response =
+                    Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void getShardGroup(Pdpb.GetShardGroupRequest request,
+                              io.grpc.stub.StreamObserver<Pdpb.GetShardGroupResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetShardGroupMethod(), request, observer);
+            return;
+        }
+        Pdpb.GetShardGroupResponse response;
+        // TODO
+        try {
+            Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId());
+            response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader)
+                                                 .setShardGroup(shardGroup).build();
+        } catch (PDException e) {
+            log.error("getPartitionStats exception", e);
+            response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void updateShardGroup(Pdpb.UpdateShardGroupRequest request,
+                                 StreamObserver<Pdpb.UpdateShardGroupResponse> responseObserver) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getUpdateShardGroupMethod(), request, responseObserver);
+            return;
+        }
+        Pdpb.UpdateShardGroupResponse response;
+
+        try {
+            var group = request.getShardGroup();
+            storeNodeService.updateShardGroup(group.getId(), group.getShardsList(),
+                                              group.getVersion(), group.getConfVer());
+            response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("updateShardGroup exception, ", e);
+            response =
+                    Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        responseObserver.onNext(response);
+        responseObserver.onCompleted();
+    }
+
+    @Override
+    public void updateShardGroupOp(Pdpb.ChangeShardRequest request,
+                                   StreamObserver<Pdpb.ChangeShardResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.ChangeShardResponse response;
+
+        try {
+            storeNodeService.shardGroupOp(request.getGroupId(), request.getShardsList());
+            response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("changeShard exception, ", e);
+            response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void changeShard(Pdpb.ChangeShardRequest request,
+                            StreamObserver<Pdpb.ChangeShardResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer);
+            return;
+        }
+
+        Pdpb.ChangeShardResponse response;
+
+        try {
+            partitionService.changeShard(request.getGroupId(), request.getShardsList());
+            response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("changeShard exception, ", e);
+            response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    @Override
+    public void updatePdRaft(Pdpb.UpdatePdRaftRequest request,
+                             StreamObserver<Pdpb.UpdatePdRaftResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getUpdatePdRaftMethod(), request, observer);
+            return;
+        }
+
+        var list = parseConfig(request.getConfig());
+
+        log.info("update raft request: {}, list: {}", request.getConfig(), list);
+
+        Pdpb.UpdatePdRaftResponse response =
+                Pdpb.UpdatePdRaftResponse.newBuilder().setHeader(okHeader).build();
+
+        do {
+            var leaders = list.stream().filter(s -> s.getKey().equals("leader"))
+                              .collect(Collectors.toList());
+            var node = RaftEngine.getInstance().getRaftNode();
+
+            if (leaders.size() == 1) {
+                var leaderPeer = leaders.get(0).getValue();
+                // change leader
+                var peers = new HashSet<>(node.listPeers());
+
+                if (!peerEquals(leaderPeer, node.getLeaderId())) {
+                    if (peers.contains(leaderPeer)) {
+                        log.info("updatePdRaft, transfer to {}", leaderPeer);
+                        node.transferLeadershipTo(leaderPeer);
+                    } else {
+                        response = Pdpb.UpdatePdRaftResponse.newBuilder()
+                                                            .setHeader(newErrorHeader(6667,
+                                                                                      "new leader" +
+                                                                                      " not in " +
+                                                                                      "raft peers"))
+                                                            .build();
+                    }
+                    break;
+                }
+            } else {
+                response = Pdpb.UpdatePdRaftResponse.newBuilder()
+                                                    .setHeader(newErrorHeader(6666,
+                                                                              "leader size != 1"))
+                                                    .build();
+                break;
+            }
+
+            Configuration config = new Configuration();
+            // add peer
+            for (var peer : list) {
+                if (!peer.getKey().equals("learner")) {
+                    config.addPeer(peer.getValue());
+                } else {
+                    config.addLearner(peer.getValue());
+                }
+            }
+
+            log.info("pd raft update with new config: {}", config);
+
+            node.changePeers(config, status -> {
+                if (status.isOk()) {
+                    log.info("updatePdRaft, change peers success");
+                } else {
+                    log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}",
+                              status, status.getErrorMsg(), status.getCode(),
+                              status.getRaftError());
+                }
+            });
+        } while (false);
+
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    public void getCache(GetGraphRequest request,
+                         StreamObserver<CacheResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetCacheMethod(), request, observer);
+            return;
+        }
+        CacheResponse response;
+        try {
+            response = CacheResponse.newBuilder().mergeFrom(storeNodeService.getCache())
+                                    .setHeader(okHeader).build();
+        } catch (PDException e) {
+            log.error("get cache exception, ", e);
+            response = CacheResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+        }
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    public void getPartitions(GetGraphRequest request,
+                              StreamObserver<CachePartitionResponse> observer) {
+        if (!isLeader()) {
+            redirectToLeader(PDGrpc.getGetPartitionsMethod(), request, observer);
+            return;
+        }
+        CachePartitionResponse response;
+        List<Metapb.Partition> partitions = partitionService.getPartitions(request.getGraphName());
+        response = CachePartitionResponse.newBuilder().addAllPartitions(partitions)
+                                         .setHeader(okHeader).build();
+        observer.onNext(response);
+        observer.onCompleted();
+    }
+
+    private List<KVPair<String, PeerId>> parseConfig(String conf) {
+        List<KVPair<String, PeerId>> result = new LinkedList<>();
+
+        if (conf != null && conf.length() > 0) {
+            for (var s : conf.split(",")) {
+                if (s.endsWith("/leader")) {
+                    result.add(new KVPair<>("leader",
+                                            JRaftUtils.getPeerId(s.substring(0, s.length() - 7))));
+                } else if (s.endsWith("/learner")) {
+                    result.add(new KVPair<>("learner",
+                                            JRaftUtils.getPeerId(s.substring(0, s.length() - 8))));
+                } else if (s.endsWith("/follower")) {
+                    result.add(new KVPair<>("follower",
+                                            JRaftUtils.getPeerId(s.substring(0, s.length() - 9))));
+                } else {
+                    result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s)));
+                }
+            }
+        }
+
+        return result;
+    }
+
+    private boolean peerEquals(PeerId p1, PeerId p2) {
+        if (p1 == null && p2 == null) {
+            return true;
+        }
+        if (p1 == null || p2 == null) {
+            return false;
+        }
+        return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort());
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java
new file mode 100644
index 0000000..ac5dfd2
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc;
+import org.apache.hugegraph.pd.grpc.watch.WatchRequest;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.watch.PDWatchSubject;
+import org.lognet.springboot.grpc.GRpcService;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@GRpcService
+public class PDWatchService extends HgPdWatchGrpc.HgPdWatchImplBase {
+
+    @Override
+    public StreamObserver<WatchRequest> watch(StreamObserver<WatchResponse> responseObserver) {
+        return PDWatchSubject.addObserver(responseObserver);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java
new file mode 100644
index 0000000..a5e6dc8
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.apache.hugegraph.pd.RegistryService;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.model.PromTargetsModel;
+import org.apache.hugegraph.pd.rest.MemberAPI;
+import org.apache.hugegraph.pd.util.HgMapCache;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Service
+@Slf4j
+public class PromTargetsService {
+
+    private final PromTargetsModel pdModel = PromTargetsModel.of()
+                                                             .addLabel("__app_name", "pd")
+                                                             .setScheme("http")
+                                                             .setMetricsPath(
+                                                                     "/actuator/prometheus");
+    private final PromTargetsModel storeModel = PromTargetsModel.of()
+                                                                .addLabel("__app_name", "store")
+                                                                .setScheme("http")
+                                                                .setMetricsPath(
+                                                                        "/actuator/prometheus");
+    private final HgMapCache<String, Set<String>> targetsCache =
+            HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H.
+    @Autowired
+    private PDConfig pdConfig;
+    @Autowired
+    private PDService pdService;
+    private RegistryService register;
+
+    private RegistryService getRegister() {
+        if (this.register == null) {
+            this.register = new RegistryService(this.pdConfig);
+        }
+        return this.register;
+    }
+
+    public List<PromTargetsModel> getAllTargets() {
+        List<PromTargetsModel> res = new LinkedList<>();
+        List<PromTargetsModel> buf =
+                this.toModels(this.getRegister().getNodes(Query.newBuilder().build()));
+
+        if (buf != null) {
+            res.addAll(buf);
+        }
+
+        res.add(getPdTargets());
+        res.add(getStoreTargets());
+
+        return res;
+    }
+
+    /**
+     * @param appName
+     * @return null if it's not existing
+     */
+    public List<PromTargetsModel> getTargets(String appName) {
+        HgAssert.isArgumentNotNull(appName, "appName");
+        switch (appName) {
+            case "pd":
+                return Collections.singletonList(this.getPdTargets());
+            case "store":
+                return Collections.singletonList(this.getStoreTargets());
+            default:
+                return this.toModels(this.getRegister()
+                                         .getNodes(Query.newBuilder().setAppName(appName).build()));
+        }
+    }
+
+    private PromTargetsModel getPdTargets() {
+        return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses()));
+    }
+
+    private PromTargetsModel getStoreTargets() {
+        return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses()));
+    }
+
+    private PromTargetsModel setTargets(PromTargetsModel model, Supplier<Set<String>> supplier) {
+        return model.setTargets(supplier.get())
+                    .setClusterId(String.valueOf(pdConfig.getClusterId()));
+    }
+
+    /* to prevent the failure of connection between pd and store or pd and pd.*/
+    //TODO: To add a schedule task to refresh targets, not to retrieve in every time.
+    private Set<String> mergeCache(String key, Set<String> set) {
+        Set<String> buf = this.targetsCache.get(key);
+
+        if (buf == null) {
+            buf = new HashSet<>();
+            this.targetsCache.put(key, buf);
+        }
+
+        if (set != null) {
+            buf.addAll(set);
+        }
+
+        return buf;
+    }
+
+    private List<PromTargetsModel> toModels(NodeInfos info) {
+        if (info == null) {
+            return null;
+        }
+
+        List<NodeInfo> nodes = info.getInfoList();
+        if (nodes == null || nodes.isEmpty()) {
+            return null;
+        }
+
+        List<PromTargetsModel> res =
+                nodes.stream().map(e -> {
+                         Map<String, String> labels = e.getLabelsMap();
+
+                         String target = labels.get("target");
+                         if (HgAssert.isInvalid(target)) {
+                             return null;
+                         }
+
+                         PromTargetsModel model = PromTargetsModel.of();
+                         model.addTarget(target);
+                         model.addLabel("__app_name", e.getAppName());
+
+                         labels.forEach((k, v) -> {
+                             k = k.trim();
+                             switch (k) {
+                                 case "metrics":
+                                     model.setMetricsPath(v.trim());
+                                     break;
+                                 case "scheme":
+                                     model.setScheme(v.trim());
+                                     break;
+                                 default:
+                                     if (k.startsWith("__")) {
+                                         model.addLabel(k, v);
+                                     }
+
+                             }
+                         });
+
+                         return model;
+                     })
+                     .filter(e -> e != null)
+                     .collect(Collectors.toList());
+
+        if (res.isEmpty()) {
+            return null;
+        }
+        return res;
+    }
+
+    private Set<String> getPdAddresses() {
+        MemberAPI.CallStreamObserverWrap<Pdpb.GetMembersResponse> response =
+                new MemberAPI.CallStreamObserverWrap<>();
+        pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response);
+        List<Metapb.Member> members = null;
+
+        try {
+            members = response.get().get(0).getMembersList();
+        } catch (Throwable e) {
+            log.error("Failed to get all pd members.", e);
+        }
+
+        Set<String> res = new HashSet<>();
+        if (members != null) {
+            members.stream().forEach(e -> res.add(e.getRestUrl()));
+        }
+
+        return res;
+    }
+
+    private Set<String> getStoreAddresses() {
+        Set<String> res = new HashSet<>();
+        List<Metapb.Store> stores = null;
+        try {
+            stores = pdService.getStoreNodeService().getStores();
+        } catch (PDException e) {
+            log.error("Failed to get all stores.", e);
+        }
+
+        if (stores != null) {
+            stores.stream().forEach(e -> {
+                String buf = this.getRestAddress(e);
+                if (buf != null) {
+                    res.add(buf);
+                }
+            });
+        }
+
+        return res;
+    }
+
+    //TODO: optimized store registry data, to add host:port of REST server.
+    private String getRestAddress(Metapb.Store store) {
+        String address = store.getAddress();
+        if (address == null || address.isEmpty()) {
+            return null;
+        }
+        try {
+            Optional<String> port = store.getLabelsList().stream().map(
+                    e -> {
+                        if ("rest.port".equals(e.getKey())) {
+                            return e.getValue();
+                        }
+                        return null;
+                    }).filter(e -> e != null).findFirst();
+
+            if (port.isPresent()) {
+                address = address.substring(0, address.indexOf(':') + 1);
+                address = address + port.get();
+
+            }
+        } catch (Throwable t) {
+            log.error("Failed to extract the REST address of store, cause by:", t);
+        }
+        return address;
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java
new file mode 100644
index 0000000..7cba93a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.raft.RaftStateListener;
+
+import io.grpc.CallOptions;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.MethodDescriptor;
+
+public interface ServiceGrpc extends RaftStateListener {
+
+    ConcurrentHashMap<String, ManagedChannel> channels = new ConcurrentHashMap();
+
+    default Pdpb.ResponseHeader getResponseHeader(PDException e) {
+        Pdpb.Error error =
+                Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())
+                          .build();
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build();
+        return header;
+    }
+
+    default Pdpb.ResponseHeader getResponseHeader() {
+        Pdpb.Error error = Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK).build();
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build();
+        return header;
+    }
+
+    default boolean isLeader() {
+        return RaftEngine.getInstance().isLeader();
+    }
+
+    default <ReqT, RespT> void redirectToLeader(ManagedChannel channel,
+                                                MethodDescriptor<ReqT, RespT> method,
+                                                ReqT req,
+                                                io.grpc.stub.StreamObserver<RespT> observer) {
+        try {
+            String address = RaftEngine.getInstance().getLeaderGrpcAddress();
+            if ((channel = channels.get(address)) == null || channel.isTerminated() ||
+                channel.isShutdown()) {
+                synchronized (ServiceGrpc.class) {
+                    if ((channel = channels.get(address)) == null || channel.isTerminated() ||
+                        channel.isShutdown()) {
+                        while (channel != null && channel.isShutdown() && !channel.isTerminated()) {
+                            channel.awaitTermination(50, TimeUnit.MILLISECONDS);
+                        }
+                        ManagedChannel c =
+                                ManagedChannelBuilder.forTarget(address).usePlaintext().build();
+                        channels.put(address, c);
+                        channel = c;
+                    }
+                }
+            }
+            io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT),
+                                                    req, observer);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+
+    }
+
+    default <ReqT, RespT> void redirectToLeader(MethodDescriptor<ReqT, RespT> method,
+                                                ReqT req,
+                                                io.grpc.stub.StreamObserver<RespT> observer) {
+        redirectToLeader(null, method, req, observer);
+
+    }
+
+    @Override
+    default void onRaftLeaderChanged() {
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java
new file mode 100644
index 0000000..f99efe5
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.service;
+
+import org.apache.hugegraph.pd.KvService;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.rest.API;
+import org.apache.hugegraph.pd.upgrade.VersionScriptFactory;
+import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Useless("upgrade related")
+@Slf4j
+public class UpgradeService {
+
+    private static final String VERSION_KEY = "DATA_VERSION";
+
+    private static final String RUN_LOG_PREFIX = "SCRIPT_RUN_LOG";
+
+    private final PDConfig pdConfig;
+
+    private final KvService kvService;
+
+    public UpgradeService(PDConfig pdConfig) {
+        this.pdConfig = pdConfig;
+        this.kvService = new KvService(pdConfig);
+    }
+
+    public void upgrade() throws PDException {
+
+        log.info("upgrade service start");
+        VersionScriptFactory factory = VersionScriptFactory.getInstance();
+        var dataVersion = getDataVersion();
+        log.info("now db data version : {}", dataVersion);
+        for (VersionUpgradeScript script : factory.getScripts()) {
+            // 执行过,run once的跳过
+            if (isExecuted(script.getClass().getName()) && script.isRunOnce()) {
+                log.info("Script {} is Executed and is run once", script.getClass().getName());
+                continue;
+            }
+
+            // 判断跳过的条件
+            if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null &&
+                                                                            !versionCompare(
+                                                                                    dataVersion,
+                                                                                    script.getHighVersion(),
+                                                                                    script.getLowVersion())) {
+                log.info(
+                        "Script {} is did not match version requirements, current data " +
+                        "version:{}, current version:{}"
+                        + "script run version({} to {}), run without data version:{}",
+                        script.getClass().getName(),
+                        dataVersion,
+                        API.VERSION,
+                        script.getHighVersion(),
+                        script.getLowVersion(),
+                        script.isRunWithoutDataVersion());
+                continue;
+            }
+
+            script.runInstruction(pdConfig);
+            logRun(script.getClass().getName());
+        }
+
+        writeCurrentDataVersion();
+    }
+
+    private boolean isExecuted(String className) throws PDException {
+        var ret = kvService.get(RUN_LOG_PREFIX + "/" + className);
+        return ret.length() > 0;
+    }
+
+    private void logRun(String className) throws PDException {
+        kvService.put(RUN_LOG_PREFIX + "/" + className, API.VERSION);
+    }
+
+    private String getDataVersion() throws PDException {
+        return kvService.get(VERSION_KEY);
+    }
+
+    private boolean versionCompare(String dataVersion, String high, String low) {
+        var currentVersion = API.VERSION;
+        return (high.equals(VersionUpgradeScript.UNLIMITED_VERSION) ||
+                high.compareTo(dataVersion) >= 0)
+               && (low.equals(VersionUpgradeScript.UNLIMITED_VERSION) ||
+                   low.compareTo(currentVersion) <= 0);
+    }
+
+    private void writeCurrentDataVersion() throws PDException {
+        log.info("update db version to {}", API.VERSION);
+        kvService.put(VERSION_KEY, API.VERSION);
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java
new file mode 100644
index 0000000..15ed5aa
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.upgrade;
+
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade;
+import org.apache.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade;
+
+@Useless("upgrade related")
+public class VersionScriptFactory {
+
+    private static final List<VersionUpgradeScript> SCRIPTS = new LinkedList<>();
+    private static volatile VersionScriptFactory factory;
+
+    static {
+        registerScript(new PartitionMetaUpgrade());
+        registerScript(new TaskCleanUpgrade());
+    }
+
+    private VersionScriptFactory() {
+
+    }
+
+    public static VersionScriptFactory getInstance() {
+        if (factory == null) {
+            synchronized (VersionScriptFactory.class) {
+                if (factory == null) {
+                    factory = new VersionScriptFactory();
+                }
+            }
+        }
+        return factory;
+    }
+
+    public static void registerScript(VersionUpgradeScript script) {
+        SCRIPTS.add(script);
+    }
+
+    public List<VersionUpgradeScript> getScripts() {
+        return SCRIPTS;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java
new file mode 100644
index 0000000..d900790
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.upgrade;
+
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.config.PDConfig;
+
+@Useless("upgrade related")
+public interface VersionUpgradeScript {
+
+    String UNLIMITED_VERSION = "UNLIMITED_VERSION";
+
+    /**
+     * the highest version that need to run upgrade instruction
+     *
+     * @return high version
+     */
+    String getHighVersion();
+
+    /**
+     * the lowest version that need to run upgrade instruction
+     *
+     * @return lower version
+     */
+    String getLowVersion();
+
+    /**
+     * pd中没有data version的时候,是否执行. 一般是对应3。6。2之前的版本
+     *
+     * @return run when pd has no data version
+     */
+    boolean isRunWithoutDataVersion();
+
+    /**
+     * the scrip just run once, ignore versions
+     *
+     * @return run once script
+     */
+    boolean isRunOnce();
+
+    /**
+     * run the upgrade instruction
+     */
+    void runInstruction(PDConfig config);
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java
new file mode 100644
index 0000000..487444e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.upgrade.scripts;
+
+import java.util.HashSet;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.meta.MetadataKeyHelper;
+import org.apache.hugegraph.pd.meta.MetadataRocksDBStore;
+import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Useless("upgrade related")
+@Slf4j
+public class PartitionMetaUpgrade implements VersionUpgradeScript {
+
+    @Override
+    public String getHighVersion() {
+        return "3.6.2";
+    }
+
+    @Override
+    public String getLowVersion() {
+        return UNLIMITED_VERSION;
+    }
+
+    @Override
+    public void runInstruction(PDConfig config) {
+
+        log.info("run PartitionMetaUpgrade script");
+        var dbStore = new MetadataRocksDBStore(config);
+
+        try {
+            var partSet = new HashSet<Integer>();
+            for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(),
+                                                MetadataKeyHelper.getGraphPrefix())) {
+                var graphPrefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName());
+                for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(),
+                                                        graphPrefix)) {
+                    var newPartition = trans(partition);
+                    var partId = partition.getId();
+                    log.info("trans partition structure: from {} to {}", partition, newPartition);
+                    // backup
+                    var key36 = MetadataKeyHelper.getPartitionV36Key(graph.getGraphName(), partId);
+                    dbStore.put(key36, partition.toByteArray());
+                    // write new structure
+                    var key = MetadataKeyHelper.getPartitionKey(graph.getGraphName(), partId);
+                    dbStore.put(key, newPartition.toByteArray());
+
+                    // construct shard group
+                    if (!partSet.contains(partId)) {
+                        var shardGroupKey = MetadataKeyHelper.getShardGroupKey(partId);
+                        var shardGroup = dbStore.getOne(Metapb.ShardGroup.parser(), shardGroupKey);
+                        if (shardGroup == null) {
+                            var shardList = partition.getShardsList();
+                            if (shardList.size() > 0) {
+                                shardGroup = Metapb.ShardGroup.newBuilder()
+                                                              .setId(partId)
+                                                              .setVersion(partition.getVersion())
+                                                              .setConfVer(0)
+                                                              .setState(partition.getState())
+                                                              .addAllShards(shardList)
+                                                              .build();
+                                dbStore.put(shardGroupKey, shardGroup.toByteArray());
+                                log.info("extract shard group from partition, {}", shardGroup);
+                            } else {
+                                throw new PDException(1000,
+                                                      "trans partition failed, no shard list");
+                            }
+                        }
+                        partSet.add(partId);
+                    }
+
+                }
+            }
+        } catch (Exception e) {
+            log.error("script: {}, run error : {}", getClass().getName(), e.getMessage());
+        }
+    }
+
+    @Override
+    public boolean isRunOnce() {
+        return true;
+    }
+
+    @Override
+    public boolean isRunWithoutDataVersion() {
+        return true;
+    }
+
+    private Metapb.Partition trans(Metapb.PartitionV36 partition) {
+
+        return Metapb.Partition.newBuilder()
+                               .setId(partition.getId())
+                               .setGraphName(partition.getGraphName())
+                               .setStartKey(partition.getStartKey())
+                               .setEndKey(partition.getEndKey())
+                               .setVersion(partition.getVersion())
+                               .setState(partition.getState())
+                               .setMessage(partition.getMessage())
+                               .build();
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java
new file mode 100644
index 0000000..b681c6f
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.upgrade.scripts;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.meta.MetadataKeyHelper;
+import org.apache.hugegraph.pd.meta.MetadataRocksDBStore;
+import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Useless("upgrade related")
+@Slf4j
+public class TaskCleanUpgrade implements VersionUpgradeScript {
+
+    @Override
+    public String getHighVersion() {
+        return UNLIMITED_VERSION;
+    }
+
+    @Override
+    public String getLowVersion() {
+        return UNLIMITED_VERSION;
+    }
+
+    @Override
+    public boolean isRunWithoutDataVersion() {
+        return true;
+    }
+
+    @Override
+    public boolean isRunOnce() {
+        return true;
+    }
+
+    @Override
+    public void runInstruction(PDConfig config) {
+        log.info("run TaskCleanUpgrade script");
+        var dbStore = new MetadataRocksDBStore(config);
+
+        try {
+            byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix();
+            log.info("delete split task:{}", dbStore.removeByPrefix(key));
+            byte[] key2 = MetadataKeyHelper.getAllMoveTaskPrefix();
+            log.info("delete move task:{}", dbStore.removeByPrefix(key2));
+        } catch (PDException e) {
+            throw new RuntimeException(e);
+        }
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java
new file mode 100644
index 0000000..94704dd
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util;
+
+import java.text.ParseException;
+import java.util.Date;
+
+import org.apache.commons.lang3.time.DateUtils;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+
+public class DateUtil {
+
+    private static final String DATE = "yyyy-MM-dd";
+    private static final String DATETIME = "yyyy-MM-dd HH:mm:ss";
+    private static final String DATETIME_MM = "yyyy-MM-dd HH:mm";
+    private static final String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS";
+    private static final String TIME = "HH:mm";
+    private static final String TIME_SS = "HH:mm:ss";
+    private static final String SYS_DATE = "yyyy/MM/dd";
+    private static final String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss";
+    private static final String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm";
+    private static final String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS";
+    private static final String NONE_DATE = "yyyyMMdd";
+    private static final String NONE_DATETIME = "yyyyMMddHHmmss";
+    private static final String NONE_DATETIME_MM = "yyyyMMddHHmm";
+    private static final String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS";
+    private static final String[] PATTERNS = new String[]{
+            DATE,
+            DATETIME,
+            DATETIME_MM,
+            DATETIME_SSS,
+            TIME,
+            TIME_SS,
+            SYS_DATE,
+            SYS_DATETIME,
+            SYS_DATETIME_MM,
+            SYS_DATETIME_SSS,
+            NONE_DATE,
+            NONE_DATETIME,
+            NONE_DATETIME_MM,
+            NONE_DATETIME_SSS
+    };
+
+    public static String[] getDefaultPattern() {
+        return PATTERNS;
+    }
+
+    public static Date getDate(String date) throws PDException {
+        try {
+            return DateUtils.parseDate(date, getDefaultPattern());
+        } catch (ParseException e) {
+            throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage());
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java
new file mode 100644
index 0000000..caf7f48
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util;
+
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executor;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hugegraph.pd.common.HgAssert;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class HgExecutorUtil {
+
+    private static final Map<String, ThreadPoolExecutor> EXECUTOR_MAP = new ConcurrentHashMap<>();
+    private static final Executor COMMON_EXECUTOR
+            = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
+                                     60L, TimeUnit.SECONDS,
+                                     new SynchronousQueue<Runnable>(),
+                                     newThreadFactory("pd-common"));
+
+    public static void execute(Runnable command) {
+        if (command == null) {
+            return;
+        }
+        COMMON_EXECUTOR.execute(command);
+    }
+
+    public static ThreadFactory newThreadFactory(String namePrefix, int priority) {
+        HgAssert.isArgumentNotNull(namePrefix, "namePrefix");
+        return new HgThreadFactory(namePrefix, priority);
+    }
+
+    public static ThreadFactory newThreadFactory(String namePrefix) {
+        HgAssert.isArgumentNotNull(namePrefix, "namePrefix");
+        return new HgDefaultThreadFactory(namePrefix);
+    }
+
+    public static ThreadPoolExecutor getThreadPoolExecutor(String name) {
+        if (name == null) {
+            return null;
+        }
+        return EXECUTOR_MAP.get(name);
+    }
+
+    /**
+     * @see HgExecutorUtil:createExecutor(String , int , int , int )
+     */
+    @Deprecated
+    public static Executor createExecutor(String name, int coreThreads, int maxThreads) {
+/*        ThreadPoolExecutor res =
+                new ThreadPoolExecutor(coreThreads, maxThreads,
+                        60L, TimeUnit.SECONDS,
+                        new LinkedBlockingQueue<Runnable>(),
+                        newThreadFactory(name));
+        if (threadPoolMap.containsKey(name)) {
+            threadPoolMap.put(name + "-1", res);
+        } else {
+            threadPoolMap.put(name, res);
+        }*/
+        return createExecutor(name, coreThreads, maxThreads, Integer.MAX_VALUE);
+    }
+
+    public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads,
+                                                    int queueSize) {
+        ThreadPoolExecutor res = EXECUTOR_MAP.get(name);
+
+        if (res != null) {
+            return res;
+        }
+
+        synchronized (EXECUTOR_MAP) {
+            res = EXECUTOR_MAP.get(name);
+            if (res != null) {
+                return res;
+            }
+
+            BlockingQueue queue = null;
+
+            if (queueSize <= 0) {
+                queue = new SynchronousQueue();
+            } else {
+                queue = new LinkedBlockingQueue<>(queueSize);
+            }
+
+            res = new ThreadPoolExecutor(
+                    coreThreads,
+                    maxThreads,
+                    60L, TimeUnit.SECONDS,
+                    queue,
+                    newThreadFactory(name)
+            );
+            EXECUTOR_MAP.put(name, res);
+        }
+
+        return res;
+    }
+
+    /**
+     * The default thread factory
+     */
+    static class HgThreadFactory implements ThreadFactory {
+
+        private final AtomicInteger threadNumber = new AtomicInteger(1);
+        private final String namePrefix;
+        private final int priority;
+
+        HgThreadFactory(String namePrefix, int priority) {
+            this.namePrefix = namePrefix;
+            this.priority = priority;
+            SecurityManager s = System.getSecurityManager();
+        }
+
+        @Override
+        public Thread newThread(Runnable r) {
+            Thread t = new Thread(null, r,
+                                  namePrefix + "-" + threadNumber.getAndIncrement(),
+                                  0);
+            if (t.isDaemon()) {
+                t.setDaemon(false);
+            }
+            if (t.getPriority() != priority) {
+                t.setPriority(priority);
+            }
+            return t;
+        }
+    }
+
+    /**
+     * The default thread factory, which added threadNamePrefix in construction method.
+     */
+    static class HgDefaultThreadFactory implements ThreadFactory {
+
+        private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1);
+        private final AtomicInteger threadNumber = new AtomicInteger(1);
+        private final String namePrefix;
+
+        HgDefaultThreadFactory(String threadNamePrefix) {
+            SecurityManager s = System.getSecurityManager();
+            this.namePrefix = threadNamePrefix + "-" +
+                              POOL_NUMBER.getAndIncrement() +
+                              "-thread-";
+        }
+
+        @Override
+        public Thread newThread(Runnable r) {
+            Thread t = new Thread(null, r,
+                                  namePrefix + threadNumber.getAndIncrement(),
+                                  0);
+            if (t.isDaemon()) {
+                t.setDaemon(false);
+            }
+            if (t.getPriority() != Thread.NORM_PRIORITY) {
+                t.setPriority(Thread.NORM_PRIORITY);
+            }
+            return t;
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java
new file mode 100644
index 0000000..e318791
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Supplier;
+
+/**
+ * @param <K>
+ * @param <V>
+ */
+public class HgMapCache<K, V> {
+
+    private final Map<K, V> cache = new ConcurrentHashMap<K, V>();
+    private final Supplier<Boolean> expiry;
+
+    private HgMapCache(Supplier<Boolean> expiredPolicy) {
+        this.expiry = expiredPolicy;
+    }
+
+    public static HgMapCache expiredOf(long interval) {
+        return new HgMapCache(new CycleIntervalPolicy(interval));
+    }
+
+    private boolean isExpired() {
+        if (expiry != null && expiry.get()) {
+            cache.clear();
+            return true;
+        }
+        return false;
+    }
+
+    public void put(K key, V value) {
+        if (key == null || value == null) {
+            return;
+        }
+        this.cache.put(key, value);
+    }
+
+    public V get(K key) {
+        if (isExpired()) {
+            return null;
+        }
+        return this.cache.get(key);
+    }
+
+    public void removeAll() {
+        this.cache.clear();
+    }
+
+    public boolean remove(K key) {
+        if (key != null) {
+            this.cache.remove(key);
+            return true;
+        }
+        return false;
+    }
+
+    public Map<K, V> getAll() {
+        return this.cache;
+    }
+
+    private static class CycleIntervalPolicy implements Supplier<Boolean> {
+
+        private long expireTime = 0;
+        private long interval = 0;
+
+        public CycleIntervalPolicy(long interval) {
+            this.interval = interval;
+            init();
+        }
+
+        private void init() {
+            expireTime = System.currentTimeMillis() + interval;
+        }
+
+        @Override
+        public Boolean get() {
+            if (System.currentTimeMillis() > expireTime) {
+                init();
+                return true;
+            }
+            return false;
+        }
+
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java
new file mode 100644
index 0000000..ed05ace
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class IdUtil {
+
+    private static final byte[] LOCK = new byte[0];
+
+    public static String createMillisStr() {
+        return String.valueOf(createMillisId());
+    }
+
+    /**
+     * Create millisecond style ID;
+     *
+     * @return
+     */
+    public static Long createMillisId() {
+        synchronized (LOCK) {
+            try {
+                Thread.sleep(1);
+            } catch (InterruptedException e) {
+                log.error("Failed to sleep", e);
+            }
+
+            return System.currentTimeMillis();
+        }
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java
new file mode 100644
index 0000000..be8f98e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util.grpc;
+
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.util.HgExecutorUtil;
+import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import io.grpc.ServerBuilder;
+
+@Component
+public class GRpcServerConfig extends GRpcServerBuilderConfigurer {
+
+    public static final String EXECUTOR_NAME = "hg-grpc";
+    @Autowired
+    private PDConfig pdConfig;
+
+    @Override
+    public void configure(ServerBuilder<?> serverBuilder) {
+        serverBuilder.executor(
+                HgExecutorUtil.createExecutor(EXECUTOR_NAME,
+                                              pdConfig.getThreadPoolGrpc().getCore(),
+                                              pdConfig.getThreadPoolGrpc().getMax(),
+                                              pdConfig.getThreadPoolGrpc().getQueue())
+        );
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java
new file mode 100644
index 0000000..1eb995e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.util.grpc;
+
+import java.lang.reflect.Field;
+
+import io.grpc.Grpc;
+import io.grpc.ServerCall;
+import io.grpc.stub.StreamObserver;
+
+public class StreamObserverUtil {
+
+    static Object fieldLock = new Object();
+    static Field callField;
+
+    public static String getRemoteIP(StreamObserver observer) {
+        String ip = "";
+        try {
+            if (callField == null) {
+                synchronized (fieldLock) {
+                    callField = observer.getClass().getDeclaredField("call");
+                    callField.setAccessible(true);
+                }
+            }
+            ServerCall call = (ServerCall) callField.get(observer);
+            if (call != null) {
+                ip = call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR).toString();
+            }
+        } catch (Exception e) {
+
+        }
+        return ip;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java
new file mode 100644
index 0000000..3e2f0b5
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+
+import com.google.protobuf.util.JsonFormat;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@ThreadSafe
+@Slf4j
+abstract class AbstractWatchSubject {
+
+    private final Map<Long, StreamObserver<WatchResponse>> watcherHolder = new HashMap<>(1024);
+    private final byte[] lock = new byte[0];
+    private final WatchResponse.Builder builder = WatchResponse.newBuilder();
+    private final WatchType watchType;
+
+    protected AbstractWatchSubject(WatchType watchType) {
+        this.watchType = watchType;
+    }
+
+    void addObserver(Long watcherId, StreamObserver<WatchResponse> responseObserver) {
+        synchronized (this.watcherHolder) {
+
+            if (this.watcherHolder.containsKey(watcherId)) {
+                responseObserver.onError(
+                        new Exception(
+                                "The watcher-id[" + watcherId + "] of " + this.watchType.name()
+                                + " subject has been existing, please unwatch it first"));
+                return;
+            }
+
+            log.info("Adding a " + this.watchType + "'s watcher, watcher-id is [" + watcherId +
+                     "].");
+            this.watcherHolder.put(watcherId, responseObserver);
+        }
+
+    }
+
+    void removeObserver(Long watcherId, StreamObserver<WatchResponse> responseObserver) {
+        synchronized (this.watcherHolder) {
+            log.info("Removing a " + this.watchType + "'s watcher, watcher-id is [" + watcherId +
+                     "].");
+            this.watcherHolder.remove(watcherId);
+        }
+        responseObserver.onCompleted();
+    }
+
+    abstract String toNoticeString(WatchResponse res);
+
+    public void notifyError(int code, String message) {
+        synchronized (lock) {
+            Iterator<Map.Entry<Long, StreamObserver<WatchResponse>>> iter =
+                    watcherHolder.entrySet().iterator();
+            while (iter.hasNext()) {
+                Map.Entry<Long, StreamObserver<WatchResponse>> entry = iter.next();
+                Long watcherId = entry.getKey();
+                WatchResponse res = this.builder.setWatcherId(watcherId).build();
+                try {
+                    entry.getValue().onError(Status.fromCodeValue(code).withDescription(message)
+                                                   .asRuntimeException());
+                } catch (Throwable e) {
+                    // log.error("Failed to send " + this.watchType.name() + "'s error message ["
+                    // + toNoticeString(res)
+                    //        + "] to watcher[" + watcherId + "].", e);
+                }
+            }
+        }
+    }
+
+    protected void notifyWatcher(WatchResponse.Builder response) {
+
+        Iterator<Map.Entry<Long, StreamObserver<WatchResponse>>> iter = watcherHolder
+                .entrySet().iterator();
+        while (iter.hasNext()) {
+            Map.Entry<Long, StreamObserver<WatchResponse>> entry = iter.next();
+            Long watcherId = entry.getKey();
+            WatchResponse res = response.setWatcherId(watcherId).build();
+            try {
+                synchronized (lock) {
+                    entry.getValue().onNext(res);
+                }
+            } catch (Throwable e) {
+                try {
+                    String msg = JsonFormat.printer().print(res);
+                    log.error(
+                            "failed to send to watcher [{}] with notice {} for ",
+                            msg, toNoticeString(res), watcherId, e);
+                } catch (Exception ex) {
+
+                }
+            }
+        }
+    }
+
+    protected void notifyWatcher(Consumer<WatchResponse.Builder> c) {
+        synchronized (lock) {
+
+            if (c == null) {
+                log.error(this.watchType.name() +
+                          "'s notice was abandoned, caused by: notifyWatcher(null)");
+                return;
+            }
+
+            try {
+                c.accept(this.builder.clear());
+            } catch (Throwable t) {
+                log.error(this.watchType.name() + "'s notice was abandoned, caused by:", t);
+                return;
+            }
+
+            Iterator<Map.Entry<Long, StreamObserver<WatchResponse>>> iter =
+                    watcherHolder.entrySet().iterator();
+
+            while (iter.hasNext()) {
+                Map.Entry<Long, StreamObserver<WatchResponse>> entry = iter.next();
+                Long watcherId = entry.getKey();
+                WatchResponse res = this.builder.setWatcherId(watcherId).build();
+
+                try {
+                    entry.getValue().onNext(res);
+                } catch (Throwable e) {
+                    log.error("Failed to send " + this.watchType.name() + "'s notice[" +
+                              toNoticeString(res)
+                              + "] to watcher[" + watcherId + "].", e);
+
+                    // TODO: ? try multi-times?
+                    iter.remove();
+
+                    log.error("Removed a " + this.watchType.name() + "'s watcher[" + entry.getKey()
+                              + "], because of once failure of sending.", e);
+                }
+
+            }
+
+        }
+
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java
new file mode 100644
index 0000000..f0109a6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.BiPredicate;
+
+import org.apache.hugegraph.pd.KvService;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.config.PDConfig;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchKv;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchState;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+import org.apache.hugegraph.pd.raft.RaftEngine;
+import org.apache.hugegraph.pd.store.RaftKVStore;
+
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * watch订阅、响应处理类
+ **/
+@Slf4j
+public class KvWatchSubject {
+
+    public static final String KEY_DELIMITER = "KW";
+    public static final String PREFIX_DELIMITER = "PW";
+    public static final String ALL_PREFIX = "W";
+    public static final long WATCH_TTL = 20000L;
+    private static final ConcurrentMap<String, StreamObserver<WatchResponse>> clients =
+            new ConcurrentHashMap<>();
+    private final KvService kvService;
+    BiPredicate<String, String> equal = String::equals;
+    BiPredicate<String, String> startWith = String::startsWith;
+
+    /**
+     * 会使用以下三组key:
+     * clients -> W@KW@key@clientId
+     * rocksdb key1 ->W@KW@key@clientId
+     * rocksdb key2 ->W@clientId@KW@key@clientId
+     **/
+    public KvWatchSubject(PDConfig pdConfig) {
+        this.kvService = new KvService(pdConfig);
+    }
+
+    public String getWatchKey(String key, String watchDelimiter) {
+        return KvService.getKeyWithoutPrefix(ALL_PREFIX, watchDelimiter, key);
+    }
+
+    private void addWatchKey(String key, String delimiter, long clientId) throws PDException {
+        String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId);
+        kvService.put(watchKey, "", WATCH_TTL);
+        String clientFirstKey =
+                KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId);
+        kvService.put(clientFirstKey, "", WATCH_TTL);
+    }
+
+    private void removeWatchKey(String key, String delimiter, long clientId) throws PDException {
+        String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId);
+        kvService.delete(watchKey);
+        String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key);
+        kvService.deleteWithPrefix(clientFirstKey);
+    }
+
+    /**
+     * 增加观察者
+     *
+     * @param key       观察的key
+     * @param clientId  客户端标识
+     * @param observer
+     * @param delimiter 观察类型标识符,对前缀监听或者对key的监听可以通过此参数区分
+     * @throws PDException
+     */
+    public void addObserver(String key, long clientId, StreamObserver<WatchResponse> observer,
+                            String delimiter) throws PDException {
+        String keyWithoutPrefix =
+                KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId);
+        clients.putIfAbsent(keyWithoutPrefix, observer);
+        addWatchKey(key, delimiter, clientId);
+        log.info("client:{},start to watch key:{}", clientId, key);
+    }
+
+    public void removeObserver(String key, long clientId, String delimiter) throws PDException {
+        removeWatchKey(key, delimiter, clientId);
+        String keyWithoutPrefix =
+                KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId);
+        clients.remove(keyWithoutPrefix);
+    }
+
+    /**
+     * 通知观察者方法,key和prefix都使用此方法,predicate不同
+     *
+     * @param key
+     * @param watchType 观察类型,一般是增加和删除
+     * @param predicate 判断等于或者是前匹配,用来适配key或prefix观察
+     * @param kvs
+     * @throws PDException
+     */
+    public void notifyObserver(String key, WatchType watchType,
+                               BiPredicate<String, String> predicate,
+                               WatchKv... kvs) throws PDException {
+        boolean isEqual = predicate.equals(equal);
+        String watchDelimiter = isEqual ? KEY_DELIMITER : PREFIX_DELIMITER;
+        String watchKeyPrefix = isEqual ? key : "";
+        String storeKey = getWatchKey(watchKeyPrefix, watchDelimiter);
+        Map<String, String> map = kvService.scanWithPrefix(storeKey);
+        String delimiter = String.valueOf(KvService.KV_DELIMITER);
+        WatchResponse watchResponse;
+        for (String keyAndClient : map.keySet()) {
+            String[] values = keyAndClient.split(delimiter);
+            assert values.length == 4;
+            String watchKey = values[2];
+            String c = values[3];
+            long clientId = Long.parseLong(c);
+            LinkedList<WatchEvent> watchEvents = new LinkedList<>();
+            for (WatchKv kv : kvs) {
+                String kvKey = kv.getKey();
+                boolean match = predicate.test(kvKey, watchKey);
+                if (!match) {
+                    continue;
+                }
+                WatchKv watchKv =
+                        WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build();
+                WatchEvent event =
+                        WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build();
+                watchEvents.add(event);
+            }
+            StreamObserver<WatchResponse> observer = clients.get(keyAndClient);
+            watchResponse =
+                    WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId)
+                                 .addAllEvents(watchEvents).build();
+
+            try {
+                if (observer != null) {
+                    synchronized (observer) {
+                        // log.info("notifyObserver for clientId:{}", clientId);
+                        observer.onNext(watchResponse);
+                    }
+                } else {
+                    log.info("cannot find StreamObserver for clientId:{}", clientId);
+                }
+            } catch (StatusRuntimeException ignored) {
+
+            } catch (Exception e) {
+                log.warn("notifyObserver with error:{}", clientId, e);
+            }
+        }
+    }
+
+    public void notifyAllObserver(String key, WatchType watchType, WatchKv[] kvs) throws
+                                                                                  PDException {
+        notifyObserver(key, watchType, equal, kvs);
+        notifyObserver(key, watchType, startWith, kvs);
+    }
+
+    /**
+     * 续活客户端
+     * 1.往客户端发一个alive的消息,带重试哈
+     * 2.如果有响应,则续活之前保存的那两组key
+     * 3.如果多次都失败,则删除内存和rocksdb的数据
+     */
+    public void keepClientAlive() {
+        WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build();
+        Set<Map.Entry<String, StreamObserver<WatchResponse>>> entries = clients.entrySet();
+        Map.Entry<String, StreamObserver<WatchResponse>>[] array =
+                entries.toArray(new Map.Entry[0]);
+        Arrays.stream(array).parallel().forEach(entry -> {
+            StreamObserver<WatchResponse> value = entry.getValue();
+            String key = entry.getKey();
+            String delimiter = KvService.getDelimiter();
+            String client = key.split(delimiter)[3];
+            String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client);
+            if (value == null) {
+                removeClient(null, key, clientKey);
+            }
+            boolean done = false;
+            String removes = client + KvService.KV_DELIMITER;
+            for (int i = 0; i < 3; i++) {
+                try {
+                    synchronized (value) {
+                        value.onNext(testAlive);
+                    }
+                    Map<String, String> clientKeys = kvService.scanWithPrefix(clientKey);
+                    for (Map.Entry<String, String> keyEntry : clientKeys.entrySet()) {
+                        String entryKey = keyEntry.getKey();
+                        String aliveKey = entryKey.replaceFirst(removes, "");
+                        boolean keepAliveKey = kvService.keepAlive(aliveKey);
+                        boolean keepAliveEntry = kvService.keepAlive(entryKey);
+                        // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey,
+                        // keepAliveKey,
+                        //         entryKey,
+                        //         keepAliveEntry);
+                        done = true;
+                    }
+                    break;
+                } catch (Exception e) {
+                    try {
+                        Thread.sleep(100);
+                    } catch (InterruptedException ex) {
+                        log.info("keep alive client {} with error:{}", client, e);
+                    }
+                }
+            }
+            if (!done) {
+                log.info("remove client {} for no data", client);
+                removeClient(value, key, clientKey);
+            }
+        });
+    }
+
+    private void removeClient(StreamObserver<WatchResponse> value, String key, String clientKey) {
+        try {
+            log.info("remove null observer,client:", clientKey);
+            if (RaftEngine.getInstance().isLeader()) {
+                kvService.deleteWithPrefix(clientKey);
+            } else {
+                // todo: delete records via client
+                var store = kvService.getMeta().getStore();
+                if (store instanceof RaftKVStore) {
+                    ((RaftKVStore) store).doRemoveByPrefix(kvService.getStoreKey(clientKey));
+                }
+            }
+
+            if (value != null) {
+                synchronized (value) {
+                    value.onCompleted();
+                }
+            }
+            clients.remove(key);
+        } catch (PDException e) {
+            log.error("remove client with error:", e);
+        }
+    }
+
+    /**
+     * 通知客户端leader切换了,重连
+     */
+    public void notifyClientChangeLeader() {
+        WatchResponse response =
+                WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build();
+        for (Map.Entry<String, StreamObserver<WatchResponse>> entry : clients.entrySet()) {
+            StreamObserver<WatchResponse> value = entry.getValue();
+            String key = entry.getKey();
+            String client = key.split(KvService.getDelimiter())[3];
+            String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client);
+            if (value == null) {
+                removeClient(null, key, clientKey);
+            }
+            for (int i = 0; i < 3; i++) {
+                try {
+                    synchronized (value) {
+                        value.onNext(response);
+                    }
+                    removeClient(value, key, clientKey);
+                    break;
+                } catch (Exception e) {
+                    try {
+                        Thread.sleep(100);
+                    } catch (InterruptedException ignored) {
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java
new file mode 100644
index 0000000..5ef1dee
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.grpc.watch.NodeEventType;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+
+/**
+ * The subject of partition change.
+ */
+@ThreadSafe
+final class NodeChangeSubject extends AbstractWatchSubject {
+
+    NodeChangeSubject() {
+        super(WatchType.WATCH_TYPE_STORE_NODE_CHANGE);
+    }
+
+    @Override
+    String toNoticeString(WatchResponse res) {
+        String sb = "graph:" + res.getNodeResponse().getGraph() +
+                    "," +
+                    "nodeId:" + res.getNodeResponse().getNodeId();
+        return sb;
+    }
+
+    public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId) {
+        isArgumentNotNull(nodeEventType, "nodeEventType");
+
+        super.notifyWatcher(builder -> {
+            builder.setNodeResponse(
+                    builder.getNodeResponseBuilder().clear()
+                           .setGraph(graph)
+                           .setNodeId(nodeId)
+                           .setNodeEventType(nodeEventType)
+                           .build()
+            );
+
+        });
+    }
+
+    @Override
+    public void notifyError(int code, String message) {
+        super.notifyError(code, message);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java
new file mode 100644
index 0000000..3b14372
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.watch.NodeEventType;
+import org.apache.hugegraph.pd.grpc.watch.WatchChangeType;
+import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest;
+import org.apache.hugegraph.pd.grpc.watch.WatchRequest;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@ThreadSafe
+public class PDWatchSubject implements StreamObserver<WatchRequest> {
+
+    public final static Map<String, AbstractWatchSubject> subjectHolder = new ConcurrentHashMap<>();
+    private final static byte[] lock = new byte[0];
+
+    static {
+        subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(),
+                          new PartitionChangeSubject());
+        subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject());
+        subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject());
+        subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(),
+                          new org.apache.hugegraph.pd.watch.ShardGroupChangeSubject());
+    }
+
+    private final StreamObserver<WatchResponse> responseObserver;
+    private AbstractWatchSubject subject;
+    private Long watcherId;
+
+    private PDWatchSubject(StreamObserver<WatchResponse> responseObserver) {
+        this.responseObserver = responseObserver;
+    }
+
+    public static StreamObserver<WatchRequest> addObserver(
+            StreamObserver<WatchResponse> responseObserver) {
+        isArgumentNotNull(responseObserver, "responseObserver");
+        return new PDWatchSubject(responseObserver);
+    }
+
+    /**
+     * Notify partition change
+     *
+     * @param changeType  change type
+     * @param graph       name of graph
+     * @param partitionId id of partition
+     */
+    public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) {
+        ((PartitionChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_PARTITION_CHANGE.name()))
+                .notifyWatcher(changeType.getGrpcType(), graph, partitionId);
+
+    }
+
+    public static void notifyShardGroupChange(ChangeType changeType, int groupId,
+                                              Metapb.ShardGroup group) {
+        ((org.apache.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get(
+                WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name()))
+                .notifyWatcher(changeType.getGrpcType(), groupId, group);
+    }
+
+    /**
+     * Notify store-node change
+     *
+     * @param changeType change type
+     * @param graph      name of graph
+     * @param nodeId     id of partition
+     */
+    public static void notifyNodeChange(NodeEventType changeType, String graph, long nodeId) {
+        ((NodeChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name()))
+                .notifyWatcher(changeType, graph, nodeId);
+    }
+
+    public static void notifyChange(WatchType type,
+                                    WatchResponse.Builder builder) {
+        subjectHolder.get(type.name()).notifyWatcher(builder);
+    }
+
+    public static void notifyError(int code, String message) {
+        subjectHolder.forEach((k, v) -> {
+            v.notifyError(code, message);
+        });
+    }
+
+    private static Long createWatcherId() {
+        synchronized (lock) {
+            Thread.yield();
+            try {
+                Thread.sleep(1);
+            } catch (InterruptedException e) {
+                log.error("Failed to sleep", e);
+            }
+
+            return System.currentTimeMillis();
+        }
+
+    }
+
+    private void cancelWatcher() {
+
+        if (this.subject == null) {
+            this.responseObserver.onError(
+                    new Exception("Invoke cancel-watch before create-watch."));
+            return;
+        }
+
+        this.subject.removeObserver(this.watcherId, this.responseObserver);
+    }
+
+    private WatchType getWatchType(WatchCreateRequest request) {
+        WatchType watchType = request.getWatchType();
+
+        if (watchType.equals(WatchType.WATCH_TYPE_UNKNOWN)) {
+            this.responseObserver.onError(new Exception("unknown watch type."));
+            return null;
+        }
+
+        return watchType;
+    }
+
+    private AbstractWatchSubject getSubject(WatchType watchType) {
+        AbstractWatchSubject subject = subjectHolder.get(watchType.name());
+
+        if (subject == null) {
+            responseObserver.onError(new Exception("Unsupported watch-type: " + watchType.name()));
+            return null;
+        }
+
+        return subject;
+    }
+
+    private void addWatcher(WatchCreateRequest request) {
+        if (this.subject != null) {
+            return;
+        }
+        WatchType watchType = getWatchType(request);
+        if (watchType == null) {
+            return;
+        }
+
+        this.subject = getSubject(watchType);
+        this.watcherId = createWatcherId();
+
+        this.subject.addObserver(this.watcherId, this.responseObserver);
+    }
+
+    @Override
+    public void onNext(WatchRequest watchRequest) {
+
+        if (watchRequest.hasCreateRequest()) {
+            this.addWatcher(watchRequest.getCreateRequest());
+            return;
+        }
+
+        if (watchRequest.hasCancelRequest()) {
+            this.cancelWatcher();
+        }
+
+    }
+
+    @Override
+    public void onError(Throwable throwable) {
+        this.cancelWatcher();
+    }
+
+    @Override
+    public void onCompleted() {
+        this.cancelWatcher();
+    }
+
+    public enum ChangeType {
+        ADD(WatchChangeType.WATCH_CHANGE_TYPE_ADD),
+        ALTER(WatchChangeType.WATCH_CHANGE_TYPE_ALTER),
+        DEL(WatchChangeType.WATCH_CHANGE_TYPE_DEL),
+
+        USER_DEFINED(WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1);
+
+        private final WatchChangeType grpcType;
+
+        ChangeType(WatchChangeType grpcType) {
+            this.grpcType = grpcType;
+        }
+
+        public WatchChangeType getGrpcType() {
+            return this.grpcType;
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java
new file mode 100644
index 0000000..c7db46e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull;
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentValid;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.grpc.watch.WatchChangeType;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+
+/**
+ * The subject of partition change.
+ */
+@ThreadSafe
+final class PartitionChangeSubject extends AbstractWatchSubject {
+
+    PartitionChangeSubject() {
+        super(WatchType.WATCH_TYPE_PARTITION_CHANGE);
+    }
+
+    @Override
+    String toNoticeString(WatchResponse res) {
+        String sb = "graph:" + res.getPartitionResponse().getGraph() +
+                    "," +
+                    "partitionId:" + res.getPartitionResponse().getPartitionId();
+        return sb;
+    }
+
+    public void notifyWatcher(WatchChangeType changeType, String graph, int partitionId) {
+        isArgumentNotNull(changeType, "changeType");
+        isArgumentValid(graph, "graph");
+
+        super.notifyWatcher(builder -> {
+            builder.setPartitionResponse(
+                    builder.getPartitionResponseBuilder().clear()
+                           .setGraph(graph)
+                           .setPartitionId(partitionId)
+                           .setChangeType(changeType)
+                           .build()
+            );
+
+        });
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java
new file mode 100644
index 0000000..3c6dd32
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull;
+
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.watch.WatchChangeType;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+
+public class ShardGroupChangeSubject extends AbstractWatchSubject {
+
+    protected ShardGroupChangeSubject() {
+        super(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE);
+    }
+
+    @Override
+    String toNoticeString(WatchResponse res) {
+        StringBuilder sb = new StringBuilder();
+        sb.append("shard group:")
+          .append(res.getShardGroupResponse().getShardGroup().toString().replace("\n", " "));
+        return sb.toString();
+    }
+
+    public void notifyWatcher(WatchChangeType changeType, int groupId,
+                              Metapb.ShardGroup shardGroup) {
+        isArgumentNotNull(changeType, "changeType");
+
+        super.notifyWatcher(builder -> {
+            builder.setShardGroupResponse(
+                    builder.getShardGroupResponseBuilder().clear()
+                           .setShardGroupId(groupId)
+                           .setType(changeType)
+                           .setShardGroup(shardGroup)
+                           .build()
+            );
+        });
+    }
+}
diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml
new file mode 100644
index 0000000..25471b6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml
@@ -0,0 +1,80 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+  application:
+    name: hugegraph-pd
+
+management:
+  metrics:
+    export:
+      prometheus:
+        enabled: true
+  endpoints:
+    web:
+      exposure:
+        include: "*"
+
+grpc:
+  port: 8686
+  # grpc的服务地址
+  host: 127.0.0.1
+  netty-server:
+    max-inbound-message-size: 100MB
+
+license:
+  verify-path: 'conf/verify-license.json'
+  license-path: 'conf/hugegraph.license'
+
+server:
+  port: 8620
+
+pd:
+  # 定期检查集群是否健康的时间间隔,单位秒
+  patrol-interval: 300
+  # 存储路径
+  data-path: tmp/pd/8610
+
+  # 最少节点数,少于该数字,集群停止入库
+  initial-store-count: 1
+  # 初始store列表,在列表内的store自动激活
+  initial-store-list: 127.0.0.1:8502
+
+raft:
+  # 本机raft服务地址
+  address: 127.0.0.1:8610
+  # PD集群服务地址
+  peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+  # raft rpc读写超时时间,单位毫秒
+  rpc-timeout: 10000
+  # 快照生成时间间隔,单位秒
+  snapshotInterval: 300
+  metrics: true
+store:
+  # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+  keepAlive-timeout: 300
+  # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+  max-down-time: 86400
+partition:
+  # 默认每个分区副本数
+  default-shard-count: 3
+  # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count
+  store-max-shard-count: 12
+
+discovery:
+  #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+  heartbeat-try-count: 3
diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt
new file mode 100644
index 0000000..27babf0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt
@@ -0,0 +1,6 @@
+  _    _  _____        _____  _____
+ | |  | |/ ____|      |  __ \|  __ \
+ | |__| | |  __ ______| |__) | |  | |
+ |  __  | | |_ |______|  ___/| |  | |
+ | |  | | |__| |      | |    | |__| |
+ |_|  |_|\_____|      |_|    |_____/
diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml
new file mode 100644
index 0000000..228f4d0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<!-- Config will be auto loaded every 60s -->
+<configuration status="error" monitorInterval="60">
+    <properties>
+        <property name="LOG_PATH">logs</property>
+        <property name="FILE_NAME">hugegraph-pd</property>
+    </properties>
+
+    <appenders>
+        <Console name="console" target="SYSTEM_OUT">
+            <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+        </Console>
+
+        <!-- Normal server log config -->
+        <RollingRandomAccessFile name="file" fileName="${LOG_PATH}/${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- jraft server log config -->
+        <RollingRandomAccessFile name="raft_file" fileName="${LOG_PATH}/${FILE_NAME}_raft.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto Delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- Separate & compress audit log, buffer size is 512KB -->
+        <RollingRandomAccessFile name="audit" fileName="${LOG_PATH}/audit-${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/audit-${FILE_NAME}-%d{yyyy-MM-dd-HH}-%i.gz"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <!-- Use simple format for audit log to speed up -->
+            <!-- PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} - %m%n"/ -->
+            <JsonLayout compact="true" eventEol="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}" />
+            </JsonLayout>
+            <!-- Trigger after exceeding 1hour or 500MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="512MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 2 files per hour & auto delete [after 60 days] or [over 5GB or 500 files] -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.gz" />
+                    <IfLastModified age="60d" />
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="5GB" />
+                        <IfAccumulatedFileCount exceeds="500" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+    </appenders>
+
+    <loggers>
+        <root level="INFO">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </root>
+        <logger name="com.alipay.sofa" level="INFO" additivity="false">
+            <appender-ref ref="raft_file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="io.netty" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="org.apache.commons" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <!-- Use mixed async way to output logs -->
+        <AsyncLogger name="org.apache.hugegraph" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </AsyncLogger>
+    </loggers>
+</configuration>
diff --git a/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer0.java b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer0.java
new file mode 100644
index 0000000..ab4c3b4
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer0.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.live;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.pd.boot.HugePDServer;
+import org.springframework.boot.SpringApplication;
+
+public class PDServer0 {
+
+    static String SERVER_NAME = "server0";
+    static String DATA_PATH = "tmp/8686";
+
+    public static void main(String[] args) {
+        //deleteDirectory(new File(DATA_PATH));
+
+        SpringApplication.run(HugePDServer.class,
+                              String.format("--spring.profiles.active=%s", SERVER_NAME));
+        System.out.println(SERVER_NAME + " started.");
+    }
+
+    public static void deleteDirectory(File dir) {
+        try {
+            FileUtils.deleteDirectory(dir);
+        } catch (IOException e) {
+            System.out.printf("Failed to start ....,%s%n", e.getMessage());
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer1.java b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer1.java
new file mode 100644
index 0000000..1a30f5a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer1.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.live;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.pd.boot.HugePDServer;
+import org.springframework.boot.SpringApplication;
+
+public class PDServer1 {
+
+    static String SERVER_NAME = "server1";
+    static String DATA_PATH = "tmp/8686";
+
+    public static void main(String[] args) {
+        deleteDirectory(new File(DATA_PATH));
+        SpringApplication.run(HugePDServer.class,
+                              String.format("--spring.profiles.active=%s", SERVER_NAME));
+        System.out.println(SERVER_NAME + " started.");
+    }
+
+    public static void deleteDirectory(File dir) {
+        try {
+            FileUtils.deleteDirectory(dir);
+        } catch (IOException e) {
+            System.out.printf("Failed to start ....,%s%n", e.getMessage());
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer2.java b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer2.java
new file mode 100644
index 0000000..74e72e5
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer2.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.live;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.pd.boot.HugePDServer;
+import org.springframework.boot.SpringApplication;
+
+public class PDServer2 {
+
+    static String SERVER_NAME = "server2";
+    static String DATA_PATH = "tmp/8687";
+
+    public static void main(String[] args) {
+        // deleteDirectory(new File(DATA_PATH));
+        SpringApplication.run(HugePDServer.class,
+                              String.format("--spring.profiles.active=%s", SERVER_NAME));
+        System.out.println(SERVER_NAME + " started.");
+    }
+
+    public static void deleteDirectory(File dir) {
+        try {
+            FileUtils.deleteDirectory(dir);
+        } catch (IOException e) {
+            System.out.printf("Failed to start ....,%s%n", e.getMessage());
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer3.java b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer3.java
new file mode 100644
index 0000000..30fb9a2
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/java/org/apache/hugegraph/pd/live/PDServer3.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.live;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hugegraph.pd.boot.HugePDServer;
+import org.springframework.boot.SpringApplication;
+
+public class PDServer3 {
+
+    static String SERVER_NAME = "server3";
+    static String DATA_PATH = "tmp/8688";
+
+    public static void main(String[] args) {
+        //   deleteDirectory(new File(DATA_PATH));
+        SpringApplication.run(HugePDServer.class,
+                              String.format("--spring.profiles.active=%s", SERVER_NAME));
+        System.out.println(SERVER_NAME + " started.");
+    }
+
+    public static void deleteDirectory(File dir) {
+        try {
+            FileUtils.deleteDirectory(dir);
+        } catch (IOException e) {
+            System.out.printf("Failed to start ....,%s%n", e.getMessage());
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml
new file mode 100644
index 0000000..5e1d63e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml
@@ -0,0 +1,71 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+  application:
+    name: hugegraph-pd
+
+management:
+  metrics:
+    export:
+      prometheus:
+        enabled: true
+  endpoints:
+    web:
+      exposure:
+        include: "*"
+
+grpc:
+  port: 8686
+  netty-server:
+    max-inbound-message-size: 100MB
+
+server:
+  port: 8620
+
+pd:
+
+  patrol-interval: 3000000
+  data-path: tmp/8686
+  # 最少节点数,少于该数字,集群停止入库
+  initial-store-count: 1
+  # 初始store列表,在列表内的store自动激活
+  initial-store-list: 127.0.0.1:8500
+
+raft:
+  address: 127.0.0.1:8610
+  # raft集群
+  peers-list: 127.0.0.1:8610
+  # raft rpc读写超时时间,单位毫秒
+  rpc-timeout: 10000
+  # 快照生成时间间隔,单位秒
+  snapshotInterval: 30000
+  metrics: true
+store:
+  # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+  keepAlive-timeout: 300
+  # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+  max-down-time: 180000
+partition:
+  # 默认每个分区副本数
+  default-shard-count: 3
+  # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count
+  store-max-shard-count: 12
+
+discovery:
+  #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+  heartbeat-try-count: 3
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml
new file mode 100644
index 0000000..7cb53fe
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml
@@ -0,0 +1,71 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+  application:
+    name: hugegraph-pd
+
+management:
+  metrics:
+    export:
+      prometheus:
+        enabled: true
+  endpoints:
+    web:
+      exposure:
+        include: "*"
+
+grpc:
+  port: 8686
+  netty-server:
+    max-inbound-message-size: 100MB
+
+server:
+  port: 8620
+
+pd:
+
+  patrol-interval: 3000000
+  data-path: tmp/8686
+  # 最少节点数,少于该数字,集群停止入库
+  initial-store-count: 1
+  # 初始store列表,在列表内的store自动激活
+  initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
+  #initial-store-list: 127.0.0.1:8501
+raft:
+  address: 127.0.0.1:8610
+  # raft集群
+  peers-list: 127.0.0.1:8610
+  # raft rpc读写超时时间,单位毫秒
+  rpc-timeout: 10000
+  # 快照生成时间间隔,单位秒
+  snapshotInterval: 30000
+  metrics: true
+store:
+  # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+  keepAlive-timeout: 300
+  # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+  max-down-time: 180000
+partition:
+  # 默认每个分区副本数
+  default-shard-count: 3
+  # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count
+  store-max-shard-count: 6
+
+discovery:
+  #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+  heartbeat-try-count: 3
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml
new file mode 100644
index 0000000..5e1dd50
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml
@@ -0,0 +1,73 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+  application:
+    name: hugegraph-pd
+
+management:
+  metrics:
+    export:
+      prometheus:
+        enabled: true
+  endpoints:
+    web:
+      exposure:
+        include: "*"
+
+grpc:
+  port: 8687
+  host: 127.0.0.1
+  netty-server:
+    max-inbound-message-size: 100MB
+
+server:
+  port: 8621
+
+pd:
+  # 集群ID,区分不同的PD集群
+  cluster_id: 1
+  patrol-interval: 300000
+  data-path: tmp/8687
+  enable-batch-load: true
+
+raft:
+  enable: true
+  address: 127.0.0.1:8611
+  # raft集群
+  peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+  # raft rpc读写超时时间,单位毫秒
+  rpc-timeout: 10000
+  # 快照生成时间间隔,单位秒
+  snapshotInterval: 300
+  metrics: true
+  # 初始store列表,在列表内的store自动激活
+  initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
+store:
+  # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+  keepAlive-timeout: 300
+  # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+  max-down-time: 1800
+partition:
+  # 默认每个分区副本数
+  default-shard-count: 3
+  # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count
+  store-max-shard-count: 3
+
+discovery:
+  #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+  heartbeat-try-count: 3
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml
new file mode 100644
index 0000000..d2b8895
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml
@@ -0,0 +1,73 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+spring:
+  application:
+    name: hugegraph-pd
+
+management:
+  metrics:
+    export:
+      prometheus:
+        enabled: true
+  endpoints:
+    web:
+      exposure:
+        include: "*"
+
+grpc:
+  port: 8688
+  host: 127.0.0.1
+  netty-server:
+    max-inbound-message-size: 100MB
+
+server:
+  port: 8622
+
+pd:
+  # 集群ID,区分不同的PD集群
+  cluster_id: 1
+  patrol-interval: 300000
+  data-path: tmp/8688
+  enable-batch-load: true
+
+raft:
+  enable: true
+  address: 127.0.0.1:8612
+  # raft集群
+  peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612
+  # raft rpc读写超时时间,单位毫秒
+  rpc-timeout: 10000
+  # 快照生成时间间隔,单位秒
+  snapshotInterval: 300
+  metrics: true
+  # 初始store列表,在列表内的store自动激活
+  initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503
+store:
+  # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒
+  keepAlive-timeout: 300
+  # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒
+  max-down-time: 1800
+partition:
+  # 默认每个分区副本数
+  default-shard-count: 3
+  # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count
+  store-max-shard-count: 3
+
+discovery:
+  #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除
+  heartbeat-try-count: 3
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt
new file mode 100644
index 0000000..27babf0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt
@@ -0,0 +1,6 @@
+  _    _  _____        _____  _____
+ | |  | |/ ____|      |  __ \|  __ \
+ | |__| | |  __ ______| |__) | |  | |
+ |  __  | | |_ |______|  ___/| |  | |
+ | |  | | |__| |      | |    | |__| |
+ |_|  |_|\_____|      |_|    |_____/
diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml
new file mode 100644
index 0000000..ea7d9a0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<!-- Config will be autoloaded every 60s -->
+<configuration status="error" monitorInterval="60">
+    <properties>
+        <property name="LOG_PATH">logs</property>
+        <property name="FILE_NAME">hugegraph-pd</property>
+    </properties>
+
+    <appenders>
+        <Console name="console" target="SYSTEM_OUT">
+            <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+        </Console>
+
+        <!-- Normal server log config -->
+        <RollingRandomAccessFile name="file" fileName="${LOG_PATH}/${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- jraft server log config -->
+        <RollingRandomAccessFile name="raft_file" fileName="${LOG_PATH}/${FILE_NAME}_raft.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto Delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- Separate & compress audit log, buffer size is 512KB -->
+        <RollingRandomAccessFile name="audit" fileName="${LOG_PATH}/audit-${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/audit-${FILE_NAME}-%d{yyyy-MM-dd-HH}-%i.gz"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <!-- Use simple format for audit log to speed up -->
+            <!-- PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} - %m%n"/ -->
+            <JsonLayout compact="true" eventEol="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}" />
+            </JsonLayout>
+            <!-- Trigger after exceeding 1hour or 500MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="512MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 2 files per hour & auto delete [after 60 days] or [over 5GB or 500 files] -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.gz" />
+                    <IfLastModified age="60d" />
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="5GB" />
+                        <IfAccumulatedFileCount exceeds="500" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+    </appenders>
+
+    <loggers>
+        <root level="INFO">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </root>
+        <logger name="com.alipay.sofa" level="INFO" additivity="false">
+            <appender-ref ref="raft_file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="io.netty" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="org.apache.commons" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <!-- Use mixed async way to output logs -->
+        <AsyncLogger name="org.apache.hugegraph" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </AsyncLogger>
+    </loggers>
+</configuration>
diff --git a/hugegraph-pd/hg-pd-test/pom.xml b/hugegraph-pd/hg-pd-test/pom.xml
index f2e187c..d362127 100644
--- a/hugegraph-pd/hg-pd-test/pom.xml
+++ b/hugegraph-pd/hg-pd-test/pom.xml
@@ -109,6 +109,11 @@
             <artifactId>hg-pd-core</artifactId>
             <version>${revision}</version>
         </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-service</artifactId>
+            <version>${revision}</version>
+        </dependency>
 
         <dependency>
             <groupId>com.google.code.gson</groupId>
diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml
index 8647775..af0ca22 100644
--- a/hugegraph-pd/pom.xml
+++ b/hugegraph-pd/pom.xml
@@ -37,8 +37,8 @@
         <module>hg-pd-client</module>
         <module>hg-pd-test</module>
         <module>hg-pd-core</module>
+        <module>hg-pd-service</module>
         <!-- TODO: uncomment later -->
-        <!-- <module>hg-pd-service</module> -->
         <!-- <module>hg-pd-dist</module> -->
     </modules>